1 |
#!/usr/bin/env python
|
2 |
import sys
|
3 |
import os
|
4 |
import fileinput
|
5 |
from array import *
|
6 |
from optparse import OptionParser
|
7 |
from OSUT3Analysis.Configuration.configurationOptions import *
|
8 |
from OSUT3Analysis.Configuration.processingUtilities import *
|
9 |
from OSUT3Analysis.Configuration.formattingUtilities import *
|
10 |
|
11 |
parser = OptionParser()
|
12 |
|
13 |
#input/output options
|
14 |
parser.add_option("-l", "--localConfig", dest="localConfig",
|
15 |
help="local configuration file")
|
16 |
parser.add_option("-c", "--condorDir", dest="condorDir",
|
17 |
help="condor output directory")
|
18 |
parser.add_option("-o", "--output-file", dest="outputFileName",
|
19 |
help="specify an output file name for the histogram file, default is 'bgMCTable.tex'")
|
20 |
|
21 |
#columns to include
|
22 |
parser.add_option("-b", "--box", action="store_true", dest="showDatasetColors", default=False,
|
23 |
help="include a column of dataset colors NOTE: the colors must be predefined in your latex file!")
|
24 |
parser.add_option("-s", "--shortName", action="store_true", dest="showShortNames", default=False,
|
25 |
help="include a column of component dataset nicknames (using the legend entry text)")
|
26 |
parser.add_option("-a", "--summedName", action="store_true", dest="showSummedNames", default=False,
|
27 |
help="include a column of composite dataset nicknames (using the legend entry text)")
|
28 |
parser.add_option("-f", "--fullNames", action="store_true", dest="showFullNames", default=False,
|
29 |
help="include a column of full dataset names")
|
30 |
|
31 |
parser.add_option("-x", "--xsection", action="store_true", dest="showXsections", default=False,
|
32 |
help="include a column of process cross sections")
|
33 |
parser.add_option("-e", "--efflumi", action="store_true", dest="showEffLumi", default=False,
|
34 |
help="include a column of effective luminosities")
|
35 |
parser.add_option("-n", "--numEvents", action="store_true", dest="showNumEvents", default=False,
|
36 |
help="include a column of number of generated events")
|
37 |
parser.add_option("-w", "--weight", action="store_true", dest="showWeight", default=False,
|
38 |
help="include a column of luminosity weighting factors")
|
39 |
|
40 |
#other options
|
41 |
parser.add_option("-r", "--replace", action="append", dest="replacements",
|
42 |
help="specify strings to be replaced by askterisks and moved into the caption ")
|
43 |
|
44 |
#initialize parser
|
45 |
(arguments, args) = parser.parse_args()
|
46 |
|
47 |
#import from local config
|
48 |
if arguments.localConfig:
|
49 |
sys.path.append(os.getcwd())
|
50 |
exec("from " + arguments.localConfig.rstrip('.py') + " import *")
|
51 |
|
52 |
#set condor directory
|
53 |
condor_dir = set_condor_output_dir(arguments)
|
54 |
|
55 |
#set output file name
|
56 |
outputFileName = "bgMCTable.tex"
|
57 |
if arguments.outputFileName:
|
58 |
outputFileName = arguments.outputFileName
|
59 |
|
60 |
outputFile = condor_dir + "/" + outputFileName
|
61 |
|
62 |
from ROOT import TFile
|
63 |
|
64 |
hLine = "\\hline\n"
|
65 |
endLine = " \\\\ "
|
66 |
newLine = " \n"
|
67 |
|
68 |
|
69 |
|
70 |
#### check which bgMC input datasets have valid output files
|
71 |
processed_datasets = []
|
72 |
for dataset in datasets:
|
73 |
if types[dataset] is not "bgMC":
|
74 |
continue
|
75 |
fileName = condor_dir + "/" + dataset + ".root"
|
76 |
if not os.path.exists(fileName):
|
77 |
continue
|
78 |
testFile = TFile(fileName)
|
79 |
if not (testFile.IsZombie()):
|
80 |
processed_datasets.append(dataset)
|
81 |
|
82 |
if len(processed_datasets) is 0:
|
83 |
sys.exit("Can't find any output root files for the given list of datasets")
|
84 |
|
85 |
|
86 |
#set the text for the luminosity label
|
87 |
if(intLumi < 1000.):
|
88 |
LumiText = str.format('{0:.1f}', LumiInPb) + " \\pbinv"
|
89 |
else:
|
90 |
LumiInFb = intLumi/1000.
|
91 |
LumiText = str.format('{0:.1f}', LumiInFb) + " \\fbinv"
|
92 |
|
93 |
#setting up the column alignments for the table
|
94 |
numColumns = 0 #always show the full datatset name column
|
95 |
columnStructure = "{"
|
96 |
if arguments.showDatasetColors:
|
97 |
numColumns = numColumns + 1
|
98 |
columnStructure = columnStructure + "c"
|
99 |
if arguments.showShortNames:
|
100 |
numColumns = numColumns + 1
|
101 |
columnStructure = columnStructure + "c"
|
102 |
if arguments.showSummedNames:
|
103 |
numColumns = numColumns + 1
|
104 |
columnStructure = columnStructure + "c"
|
105 |
if arguments.showFullNames:
|
106 |
numColumns = numColumns + 1
|
107 |
columnStructure = columnStructure + "l"
|
108 |
if arguments.showNumEvents:
|
109 |
numColumns = numColumns + 1
|
110 |
columnStructure = columnStructure + "c"
|
111 |
if arguments.showXsections:
|
112 |
numColumns = numColumns + 1
|
113 |
columnStructure = columnStructure + "c"
|
114 |
if arguments.showEffLumi:
|
115 |
numColumns = numColumns + 1
|
116 |
columnStructure = columnStructure + "c"
|
117 |
if arguments.showWeight:
|
118 |
numColumns = numColumns + 1
|
119 |
columnStructure = columnStructure + "c"
|
120 |
columnStructure = columnStructure + "}"
|
121 |
|
122 |
|
123 |
|
124 |
fout = open (outputFile, "w")
|
125 |
fout.write ("\\makebox[0pt]{\\renewcommand{\\arraystretch}{1.2}\\begin{tabular}"+columnStructure+newLine+hLine)
|
126 |
|
127 |
#write the column headers to the output file
|
128 |
headerLine = ""
|
129 |
if arguments.showDatasetColors:
|
130 |
headerLine = headerLine + "\\shortstack{Composite \\\\ Dataset \\\\ Color} & "
|
131 |
if arguments.showSummedNames:
|
132 |
headerLine = headerLine + "\\shortstack{Composite \\\\ Dataset \\\\ Nickname} & "
|
133 |
if arguments.showShortNames:
|
134 |
headerLine = headerLine + "\\shortstack{Individual \\\\ Dataset \\\\ Nickname} & "
|
135 |
if arguments.showFullNames:
|
136 |
headerLine = headerLine + "\\multicolumn{1}{c}{Individual Dataset Source Name} & "
|
137 |
if arguments.showNumEvents:
|
138 |
headerLine = headerLine + "\\shortstack{Generated \\\\ Events} & "
|
139 |
if arguments.showXsections:
|
140 |
headerLine = headerLine + "\\shortstack{Cross \\\\ Section \\\\ (pb)} & "
|
141 |
if arguments.showEffLumi:
|
142 |
headerLine = headerLine + "\\shortstack{Effective \\\\ Luminosity \\\\ (\\fbinv)} & "
|
143 |
if arguments.showWeight:
|
144 |
headerLine = headerLine + "\\shortstack{Weighting \\\\ Factor \\\\ for " + LumiText + "} & "
|
145 |
|
146 |
|
147 |
fout.write (headerLine.rstrip(" & ")+endLine+newLine+hLine)
|
148 |
|
149 |
|
150 |
for dataset in processed_datasets:
|
151 |
datasetLines = ""
|
152 |
numComponents = 1
|
153 |
component_datasets = []
|
154 |
|
155 |
#if there are component datasets, we'll need all of them
|
156 |
if dataset in composite_dataset_definitions:
|
157 |
numComponents = len(composite_dataset_definitions[dataset])
|
158 |
for component in composite_dataset_definitions[dataset]:
|
159 |
component_datasets.append(component)
|
160 |
else:
|
161 |
component_datasets.append(dataset)
|
162 |
|
163 |
|
164 |
#include the fancy multirow business for dataset nicknames
|
165 |
if arguments.showDatasetColors:
|
166 |
datasetLines = datasetLines + "\\multirow{"+str(numComponents)+"}{*}{\\color{"+str(colors[dataset])+"}{\\LARGE $\\blacksquare$}} & "
|
167 |
if arguments.showSummedNames:
|
168 |
rawlabel = "$" + labels[dataset] + "$"
|
169 |
label = rawlabel.replace("#","\\").replace("\\rightarrow","{\\rightarrow}").replace(" ","\\ ")
|
170 |
datasetLines = datasetLines + "\\multirow{"+str(numComponents)+"}{*}{"+label+"} & "
|
171 |
|
172 |
|
173 |
#loop over each component (even if there's just one) and add the appropriate content to the table
|
174 |
counter = 0
|
175 |
for component in component_datasets:
|
176 |
counter = counter + 1
|
177 |
if arguments.showDatasetColors and counter > 1:
|
178 |
datasetLines = datasetLines + " & "
|
179 |
if arguments.showSummedNames and counter > 1:
|
180 |
datasetLines = datasetLines + " & "
|
181 |
if arguments.showShortNames:
|
182 |
datasetLines = datasetLines + "$" + labels[component].replace("#","\\").replace("\\rightarrow","{\\rightarrow}").replace(" ","\\ ").replace("Pt","\\pt") + "$" + " & "
|
183 |
if arguments.showFullNames:
|
184 |
datasetLines = datasetLines + "\\texttt{" + formatString(dataset_names[component]).lstrip("/").replace("/","_").replace("_","\_") + "}" + " & "
|
185 |
if arguments.showNumEvents:
|
186 |
fileName = condor_dir + "/" + component + "/numberOfEvents.txt"
|
187 |
with open(fileName) as numEventsFile:
|
188 |
content = numEventsFile.readlines()
|
189 |
numEvents = content[0].strip("\n")
|
190 |
datasetLines = datasetLines + formatNumber(numEvents) + " & "
|
191 |
if arguments.showXsections:
|
192 |
fileName = condor_dir + "/" + component + "/crossSectionInPicobarn.txt"
|
193 |
with open(fileName) as crossSectionFile:
|
194 |
content = crossSectionFile.readlines()
|
195 |
crossSection = content[0].strip("\n")
|
196 |
datasetLines = datasetLines + formatNumber(crossSection) + " & "
|
197 |
if arguments.showEffLumi:
|
198 |
fileName = condor_dir + "/" + component + "/numberOfEvents.txt"
|
199 |
with open(fileName) as numEventsFile:
|
200 |
content = numEventsFile.readlines()
|
201 |
numEvents = content[0].strip("\n")
|
202 |
fileName = condor_dir + "/" + component + "/crossSectionInPicobarn.txt"
|
203 |
with open(fileName) as crossSectionFile:
|
204 |
content = crossSectionFile.readlines()
|
205 |
crossSection = content[0].strip("\n")
|
206 |
datasetLines = datasetLines + formatNumber(str(round_sigfigs(float(numEvents)/float(crossSection)/1000.,3)).rstrip("0").rstrip(".")) + " & "
|
207 |
if arguments.showWeight:
|
208 |
fileName = condor_dir + "/" + component + "/numberOfEvents.txt"
|
209 |
with open(fileName) as numEventsFile:
|
210 |
content = numEventsFile.readlines()
|
211 |
numEvents = content[0].strip("\n")
|
212 |
fileName = condor_dir + "/" + component + "/crossSectionInPicobarn.txt"
|
213 |
with open(fileName) as crossSectionFile:
|
214 |
content = crossSectionFile.readlines()
|
215 |
crossSection = content[0].strip("\n")
|
216 |
datasetLines = datasetLines + formatNumber(str(round_sigfigs(intLumi*float(crossSection)/float(numEvents),3)).rstrip("0").rstrip(".")) + " & "
|
217 |
|
218 |
|
219 |
datasetLines = datasetLines.rstrip(" & ")
|
220 |
datasetLines = datasetLines + endLine + newLine
|
221 |
|
222 |
fout.write(datasetLines)
|
223 |
fout.write(hLine)
|
224 |
|
225 |
|
226 |
|
227 |
if arguments.replacements:
|
228 |
replacementsText = ""
|
229 |
filler = "*"
|
230 |
for replacement in arguments.replacements:
|
231 |
line = "\\multicolumn{"+str(numColumns)+"}{l}{" + " \\texttt{" + filler + " " + replacement.replace("/","_").replace("_","\_") + "}}" + endLine + newLine
|
232 |
filler = filler + "*"
|
233 |
replacementsText = replacementsText + line
|
234 |
fout.write(replacementsText)
|
235 |
|
236 |
|
237 |
|
238 |
|
239 |
fout.write("\\end{tabular}}")
|
240 |
fout.close()
|
241 |
|
242 |
|
243 |
|