1 |
|
from JobType import JobType |
2 |
– |
from crab_logger import Logger |
2 |
|
from crab_exceptions import * |
3 |
|
from crab_util import * |
5 |
– |
from BlackWhiteListParser import BlackWhiteListParser |
4 |
|
import common |
5 |
|
import Scram |
6 |
< |
from LFNBaseName import * |
6 |
> |
from Splitter import JobSplitter |
7 |
|
|
8 |
+ |
from IMProv.IMProvNode import IMProvNode |
9 |
|
import os, string, glob |
10 |
|
|
11 |
|
class Cmssw(JobType): |
12 |
|
def __init__(self, cfg_params, ncjobs,skip_blocks, isNew): |
13 |
|
JobType.__init__(self, 'CMSSW') |
14 |
< |
common.logger.debug(3,'CMSSW::__init__') |
14 |
> |
common.logger.debug('CMSSW::__init__') |
15 |
|
self.skip_blocks = skip_blocks |
16 |
< |
|
18 |
< |
self.argsList = [] |
16 |
> |
self.argsList = 1 |
17 |
|
|
18 |
|
self._params = {} |
19 |
|
self.cfg_params = cfg_params |
22 |
– |
# init BlackWhiteListParser |
23 |
– |
self.blackWhiteListParser = BlackWhiteListParser(cfg_params) |
20 |
|
|
21 |
< |
self.MaxTarBallSize = float(self.cfg_params.get('EDG.maxtarballsize',9.5)) |
21 |
> |
### Temporary patch to automatically skip the ISB size check: |
22 |
> |
server=self.cfg_params.get('CRAB.server_name',None) |
23 |
> |
size = 9.5 |
24 |
> |
if server or common.scheduler.name().upper() in ['LSF','CAF']: size = 99999 |
25 |
> |
### D.S. |
26 |
> |
self.MaxTarBallSize = float(self.cfg_params.get('GRID.maxtarballsize',size)) |
27 |
|
|
28 |
|
# number of jobs requested to be created, limit obj splitting |
29 |
|
self.ncjobs = ncjobs |
30 |
|
|
30 |
– |
log = common.logger |
31 |
– |
|
31 |
|
self.scram = Scram.Scram(cfg_params) |
32 |
|
self.additional_inbox_files = [] |
33 |
|
self.scriptExe = '' |
34 |
|
self.executable = '' |
35 |
|
self.executable_arch = self.scram.getArch() |
36 |
< |
self.tgz_name = 'default.tgz' |
36 |
> |
self.tgz_name = 'default.tar.gz' |
37 |
> |
self.tar_name = 'default.tar' |
38 |
|
self.scriptName = 'CMSSW.sh' |
39 |
|
self.pset = '' |
40 |
|
self.datasetPath = '' |
41 |
|
|
42 |
+ |
self.tgzNameWithPath = common.work_space.pathForTgz()+self.tgz_name |
43 |
|
# set FJR file name |
44 |
|
self.fjrFileName = 'crab_fjr.xml' |
45 |
|
|
46 |
|
self.version = self.scram.getSWVersion() |
47 |
+ |
common.logger.log(10-1,"CMSSW version is: "+str(self.version)) |
48 |
+ |
|
49 |
|
version_array = self.version.split('_') |
50 |
|
self.CMSSW_major = 0 |
51 |
|
self.CMSSW_minor = 0 |
58 |
|
msg = "Cannot parse CMSSW version string: " + self.version + " for major and minor release number!" |
59 |
|
raise CrabException(msg) |
60 |
|
|
61 |
+ |
if self.CMSSW_major < 1 or (self.CMSSW_major == 1 and self.CMSSW_minor < 5): |
62 |
+ |
msg = "CRAB supports CMSSW >= 1_5_x only. Use an older CRAB version." |
63 |
+ |
raise CrabException(msg) |
64 |
+ |
""" |
65 |
+ |
As CMSSW versions are dropped we can drop more code: |
66 |
+ |
1.X dropped: drop support for running .cfg on WN |
67 |
+ |
2.0 dropped: drop all support for cfg here and in writeCfg |
68 |
+ |
2.0 dropped: Recheck the random number seed support |
69 |
+ |
""" |
70 |
+ |
|
71 |
|
### collect Data cards |
72 |
|
|
60 |
– |
if not cfg_params.has_key('CMSSW.datasetpath'): |
61 |
– |
msg = "Error: datasetpath not defined " |
62 |
– |
raise CrabException(msg) |
73 |
|
|
74 |
|
### Temporary: added to remove input file control in the case of PU |
75 |
|
self.dataset_pu = cfg_params.get('CMSSW.dataset_pu', None) |
76 |
|
|
77 |
|
tmp = cfg_params['CMSSW.datasetpath'] |
78 |
< |
log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp) |
79 |
< |
if string.lower(tmp)=='none': |
78 |
> |
common.logger.log(10-1, "CMSSW::CMSSW(): datasetPath = "+tmp) |
79 |
> |
|
80 |
> |
if tmp =='': |
81 |
> |
msg = "Error: datasetpath not defined " |
82 |
> |
raise CrabException(msg) |
83 |
> |
elif string.lower(tmp)=='none': |
84 |
|
self.datasetPath = None |
85 |
|
self.selectNoInput = 1 |
86 |
|
else: |
88 |
|
self.selectNoInput = 0 |
89 |
|
|
90 |
|
self.dataTiers = [] |
91 |
< |
self.debugWrap = '' |
92 |
< |
self.debug_wrapper = cfg_params.get('USER.debug_wrapper',False) |
93 |
< |
if self.debug_wrapper: self.debugWrap='--debug' |
91 |
> |
|
92 |
> |
self.debugWrap='' |
93 |
> |
self.debug_wrapper = int(cfg_params.get('USER.debug_wrapper',0)) |
94 |
> |
if self.debug_wrapper == 1: self.debugWrap='--debug' |
95 |
> |
|
96 |
|
## now the application |
97 |
+ |
self.managedGenerators = ['madgraph', 'comphep', 'lhe'] |
98 |
+ |
self.generator = cfg_params.get('CMSSW.generator','pythia').lower() |
99 |
|
self.executable = cfg_params.get('CMSSW.executable','cmsRun') |
100 |
< |
log.debug(6, "CMSSW::CMSSW(): executable = "+self.executable) |
100 |
> |
common.logger.log(10-1, "CMSSW::CMSSW(): executable = "+self.executable) |
101 |
|
|
102 |
|
if not cfg_params.has_key('CMSSW.pset'): |
103 |
|
raise CrabException("PSet file missing. Cannot run cmsRun ") |
104 |
|
self.pset = cfg_params['CMSSW.pset'] |
105 |
< |
log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset) |
105 |
> |
common.logger.log(10-1, "Cmssw::Cmssw(): PSet file = "+self.pset) |
106 |
|
if self.pset.lower() != 'none' : |
107 |
|
if (not os.path.exists(self.pset)): |
108 |
|
raise CrabException("User defined PSet file "+self.pset+" does not exist") |
134 |
|
raise CrabException(msg) |
135 |
|
self.additional_inbox_files.append(string.strip(self.scriptExe)) |
136 |
|
|
137 |
+ |
self.AdditionalArgs = cfg_params.get('USER.script_arguments',None) |
138 |
+ |
if self.AdditionalArgs : self.AdditionalArgs = string.replace(self.AdditionalArgs,',',' ') |
139 |
+ |
|
140 |
|
if self.datasetPath == None and self.pset == None and self.scriptExe == '' : |
141 |
|
msg ="Error. script_exe not defined" |
142 |
|
raise CrabException(msg) |
143 |
|
|
144 |
|
# use parent files... |
145 |
< |
self.useParent = self.cfg_params.get('CMSSW.use_parent',False) |
145 |
> |
self.useParent = int(self.cfg_params.get('CMSSW.use_parent',0)) |
146 |
|
|
147 |
|
## additional input files |
148 |
|
if cfg_params.has_key('USER.additional_input_files'): |
165 |
|
self.additional_inbox_files.append(string.strip(file)) |
166 |
|
pass |
167 |
|
pass |
168 |
< |
common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files)) |
168 |
> |
common.logger.debug("Additional input files: "+str(self.additional_inbox_files)) |
169 |
|
pass |
170 |
|
|
150 |
– |
## Events per job |
151 |
– |
if cfg_params.has_key('CMSSW.events_per_job'): |
152 |
– |
self.eventsPerJob =int( cfg_params['CMSSW.events_per_job']) |
153 |
– |
self.selectEventsPerJob = 1 |
154 |
– |
else: |
155 |
– |
self.eventsPerJob = -1 |
156 |
– |
self.selectEventsPerJob = 0 |
157 |
– |
|
158 |
– |
## number of jobs |
159 |
– |
if cfg_params.has_key('CMSSW.number_of_jobs'): |
160 |
– |
self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs']) |
161 |
– |
self.selectNumberOfJobs = 1 |
162 |
– |
else: |
163 |
– |
self.theNumberOfJobs = 0 |
164 |
– |
self.selectNumberOfJobs = 0 |
165 |
– |
|
166 |
– |
if cfg_params.has_key('CMSSW.total_number_of_events'): |
167 |
– |
self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events']) |
168 |
– |
self.selectTotalNumberEvents = 1 |
169 |
– |
if self.selectNumberOfJobs == 1: |
170 |
– |
if (self.total_number_of_events != -1) and int(self.total_number_of_events) < int(self.theNumberOfJobs): |
171 |
– |
msg = 'Must specify at least one event per job. total_number_of_events > number_of_jobs ' |
172 |
– |
raise CrabException(msg) |
173 |
– |
else: |
174 |
– |
self.total_number_of_events = 0 |
175 |
– |
self.selectTotalNumberEvents = 0 |
176 |
– |
|
177 |
– |
if self.pset != None: |
178 |
– |
if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ): |
179 |
– |
msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.' |
180 |
– |
raise CrabException(msg) |
181 |
– |
else: |
182 |
– |
if (self.selectNumberOfJobs == 0): |
183 |
– |
msg = 'Must specify number_of_jobs.' |
184 |
– |
raise CrabException(msg) |
171 |
|
|
172 |
|
## New method of dealing with seeds |
173 |
|
self.incrementSeeds = [] |
183 |
|
tmp.strip() |
184 |
|
self.incrementSeeds.append(tmp) |
185 |
|
|
200 |
– |
## Old method of dealing with seeds |
201 |
– |
## FUTURE: This is for old CMSSW and old CRAB. Can throw exceptions after a couple of CRAB releases and then |
202 |
– |
## remove |
203 |
– |
self.sourceSeed = cfg_params.get('CMSSW.pythia_seed',None) |
204 |
– |
if self.sourceSeed: |
205 |
– |
print "pythia_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds." |
206 |
– |
self.incrementSeeds.append('sourceSeed') |
207 |
– |
self.incrementSeeds.append('theSource') |
208 |
– |
|
209 |
– |
self.sourceSeedVtx = cfg_params.get('CMSSW.vtx_seed',None) |
210 |
– |
if self.sourceSeedVtx: |
211 |
– |
print "vtx_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds." |
212 |
– |
self.incrementSeeds.append('VtxSmeared') |
213 |
– |
|
214 |
– |
self.sourceSeedG4 = cfg_params.get('CMSSW.g4_seed',None) |
215 |
– |
if self.sourceSeedG4: |
216 |
– |
print "g4_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds." |
217 |
– |
self.incrementSeeds.append('g4SimHits') |
218 |
– |
|
219 |
– |
self.sourceSeedMix = cfg_params.get('CMSSW.mix_seed',None) |
220 |
– |
if self.sourceSeedMix: |
221 |
– |
print "mix_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds." |
222 |
– |
self.incrementSeeds.append('mix') |
223 |
– |
|
186 |
|
self.firstRun = cfg_params.get('CMSSW.first_run',None) |
187 |
|
|
226 |
– |
|
188 |
|
# Copy/return |
189 |
|
self.copy_data = int(cfg_params.get('USER.copy_data',0)) |
190 |
|
self.return_data = int(cfg_params.get('USER.return_data',0)) |
191 |
|
|
192 |
+ |
self.conf = {} |
193 |
+ |
self.conf['pubdata'] = None |
194 |
+ |
# number of jobs requested to be created, limit obj splitting DD |
195 |
|
#DBSDLS-start |
196 |
|
## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code |
197 |
|
self.maxEvents=0 # max events available ( --> check the requested nb. of evts in Creator.py) |
203 |
|
if self.datasetPath: |
204 |
|
blockSites = self.DataDiscoveryAndLocation(cfg_params) |
205 |
|
#DBSDLS-end |
206 |
+ |
self.conf['blockSites']=blockSites |
207 |
|
|
208 |
|
## Select Splitting |
209 |
+ |
splitByRun = int(cfg_params.get('CMSSW.split_by_run',0)) |
210 |
+ |
|
211 |
|
if self.selectNoInput: |
212 |
|
if self.pset == None: |
213 |
< |
self.jobSplittingForScript() |
213 |
> |
self.algo = 'ForScript' |
214 |
|
else: |
215 |
< |
self.jobSplittingNoInput() |
215 |
> |
self.algo = 'NoInput' |
216 |
> |
self.conf['managedGenerators']=self.managedGenerators |
217 |
> |
self.conf['generator']=self.generator |
218 |
> |
elif splitByRun ==1: |
219 |
> |
self.algo = 'RunBased' |
220 |
|
else: |
221 |
< |
self.jobSplittingByBlocks(blockSites) |
221 |
> |
self.algo = 'EventBased' |
222 |
|
|
223 |
+ |
# self.algo = 'LumiBased' |
224 |
+ |
splitter = JobSplitter(self.cfg_params,self.conf) |
225 |
+ |
self.dict = splitter.Algos()[self.algo]() |
226 |
+ |
|
227 |
+ |
self.argsFile= '%s/arguments.xml'%common.work_space.shareDir() |
228 |
+ |
self.rootArgsFilename= 'arguments' |
229 |
|
# modify Pset only the first time |
230 |
< |
if isNew: |
231 |
< |
if self.pset != None: |
232 |
< |
import PsetManipulator as pp |
233 |
< |
PsetEdit = pp.PsetManipulator(self.pset) |
234 |
< |
try: |
235 |
< |
# Add FrameworkJobReport to parameter-set, set max events. |
236 |
< |
# Reset later for data jobs by writeCFG which does all modifications |
237 |
< |
PsetEdit.addCrabFJR(self.fjrFileName) # FUTURE: Job report addition not needed by CMSSW>1.5 |
238 |
< |
PsetEdit.maxEvent(self.eventsPerJob) |
239 |
< |
PsetEdit.psetWriter(self.configFilename()) |
240 |
< |
## If present, add TFileService to output files |
241 |
< |
if not int(cfg_params.get('CMSSW.skip_TFileService_output',0)): |
242 |
< |
tfsOutput = PsetEdit.getTFileService() |
243 |
< |
if tfsOutput: |
244 |
< |
if tfsOutput in self.output_file: |
245 |
< |
common.logger.debug(5,"Output from TFileService "+tfsOutput+" already in output files") |
246 |
< |
else: |
247 |
< |
outfileflag = True #output found |
248 |
< |
self.output_file.append(tfsOutput) |
249 |
< |
common.logger.message("Adding "+tfsOutput+" to output files (from TFileService)") |
250 |
< |
pass |
251 |
< |
pass |
252 |
< |
## If present and requested, add PoolOutputModule to output files |
253 |
< |
if int(cfg_params.get('CMSSW.get_edm_output',0)): |
254 |
< |
edmOutput = PsetEdit.getPoolOutputModule() |
255 |
< |
if edmOutput: |
256 |
< |
if edmOutput in self.output_file: |
257 |
< |
common.logger.debug(5,"Output from PoolOutputModule "+edmOutput+" already in output files") |
258 |
< |
else: |
259 |
< |
self.output_file.append(edmOutput) |
260 |
< |
common.logger.message("Adding "+edmOutput+" to output files (from PoolOutputModule)") |
261 |
< |
pass |
262 |
< |
pass |
263 |
< |
except CrabException: |
264 |
< |
msg='Error while manipulating ParameterSet: exiting...' |
265 |
< |
raise CrabException(msg) |
266 |
< |
## Prepare inputSandbox TarBall (only the first time) |
267 |
< |
self.tgzNameWithPath = self.getTarBall(self.executable) |
230 |
> |
if (isNew and self.pset != None): self.ModifyPset() |
231 |
> |
|
232 |
> |
## Prepare inputSandbox TarBall (only the first time) |
233 |
> |
self.tarNameWithPath = self.getTarBall(self.executable) |
234 |
> |
|
235 |
> |
|
236 |
> |
def ModifyPset(self): |
237 |
> |
import PsetManipulator as pp |
238 |
> |
PsetEdit = pp.PsetManipulator(self.pset) |
239 |
> |
try: |
240 |
> |
# Add FrameworkJobReport to parameter-set, set max events. |
241 |
> |
# Reset later for data jobs by writeCFG which does all modifications |
242 |
> |
PsetEdit.maxEvent(1) |
243 |
> |
PsetEdit.skipEvent(0) |
244 |
> |
PsetEdit.psetWriter(self.configFilename()) |
245 |
> |
## If present, add TFileService to output files |
246 |
> |
if not int(self.cfg_params.get('CMSSW.skip_TFileService_output',0)): |
247 |
> |
tfsOutput = PsetEdit.getTFileService() |
248 |
> |
if tfsOutput: |
249 |
> |
if tfsOutput in self.output_file: |
250 |
> |
common.logger.debug("Output from TFileService "+tfsOutput+" already in output files") |
251 |
> |
else: |
252 |
> |
outfileflag = True #output found |
253 |
> |
self.output_file.append(tfsOutput) |
254 |
> |
common.logger.info("Adding "+tfsOutput+" (from TFileService) to list of output files") |
255 |
> |
pass |
256 |
> |
pass |
257 |
> |
## If present and requested, add PoolOutputModule to output files |
258 |
> |
edmOutput = PsetEdit.getPoolOutputModule() |
259 |
> |
if int(self.cfg_params.get('CMSSW.get_edm_output',0)): |
260 |
> |
if edmOutput: |
261 |
> |
if edmOutput in self.output_file: |
262 |
> |
common.logger.debug("Output from PoolOutputModule "+edmOutput+" already in output files") |
263 |
> |
else: |
264 |
> |
self.output_file.append(edmOutput) |
265 |
> |
common.logger.info("Adding "+edmOutput+" (from PoolOutputModule) to list of output files") |
266 |
> |
pass |
267 |
> |
pass |
268 |
> |
# not required: check anyhow if present, to avoid accidental T2 overload |
269 |
> |
else: |
270 |
> |
if edmOutput and (edmOutput not in self.output_file): |
271 |
> |
msg = "ERROR: a PoolOutputModule is present in your ParameteSet %s \n"%self.pset |
272 |
> |
msg +=" but the file produced ( %s ) is not in the list of output files\n"%edmOutput |
273 |
> |
msg += "WARNING: please remove it. If you want to keep it, add the file to output_files or use CMSSW.get_edm_output\n" |
274 |
> |
if int(self.cfg_params.get('CMSSW.ignore_edm_output',0)): |
275 |
> |
msg +=" CMSSW.ignore_edm_output==True : Hope you know what you are doing...\n" |
276 |
> |
common.logger.info(msg) |
277 |
> |
else: |
278 |
> |
raise CrabException(msg) |
279 |
> |
pass |
280 |
> |
pass |
281 |
> |
|
282 |
> |
if (PsetEdit.getBadFilesSetting()): |
283 |
> |
msg = "WARNING: You have set skipBadFiles to True. This will continue processing on some errors and you may not be notified." |
284 |
> |
common.logger.info(msg) |
285 |
> |
|
286 |
> |
except CrabException, msg: |
287 |
> |
common.logger.info(str(msg)) |
288 |
> |
msg='Error while manipulating ParameterSet (see previous message, if any): exiting...' |
289 |
> |
raise CrabException(msg) |
290 |
> |
|
291 |
|
|
292 |
|
def DataDiscoveryAndLocation(self, cfg_params): |
293 |
|
|
294 |
|
import DataDiscovery |
295 |
|
import DataLocation |
296 |
< |
common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()") |
296 |
> |
common.logger.log(10-1,"CMSSW::DataDiscoveryAndLocation()") |
297 |
|
|
298 |
|
datasetPath=self.datasetPath |
299 |
|
|
300 |
|
## Contact the DBS |
301 |
< |
common.logger.message("Contacting Data Discovery Services ...") |
301 |
> |
common.logger.info("Contacting Data Discovery Services ...") |
302 |
|
try: |
303 |
|
self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params,self.skip_blocks) |
304 |
|
self.pubdata.fetchDBSInfo() |
314 |
|
raise CrabException(msg) |
315 |
|
|
316 |
|
self.filesbyblock=self.pubdata.getFiles() |
317 |
< |
self.eventsbyblock=self.pubdata.getEventsPerBlock() |
318 |
< |
self.eventsbyfile=self.pubdata.getEventsPerFile() |
319 |
< |
self.parentFiles=self.pubdata.getParent() |
317 |
> |
#print self.filesbyblock |
318 |
> |
self.conf['pubdata']=self.pubdata |
319 |
|
|
320 |
|
## get max number of events |
321 |
|
self.maxEvents=self.pubdata.getMaxEvents() |
324 |
|
try: |
325 |
|
dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params) |
326 |
|
dataloc.fetchDLSInfo() |
327 |
+ |
|
328 |
|
except DataLocation.DataLocationError , ex: |
329 |
|
msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage() |
330 |
|
raise CrabException(msg) |
331 |
|
|
332 |
|
|
333 |
< |
sites = dataloc.getSites() |
333 |
> |
unsorted_sites = dataloc.getSites() |
334 |
> |
sites = self.filesbyblock.fromkeys(self.filesbyblock,'') |
335 |
> |
for lfn in self.filesbyblock.keys(): |
336 |
> |
if unsorted_sites.has_key(lfn): |
337 |
> |
sites[lfn]=unsorted_sites[lfn] |
338 |
> |
else: |
339 |
> |
sites[lfn]=[] |
340 |
> |
|
341 |
> |
if len(sites)==0: |
342 |
> |
msg = 'ERROR ***: no location for any of the blocks of this dataset: \n\t %s \n'%datasetPath |
343 |
> |
msg += "\tMaybe the dataset is located only at T1's (or at T0), where analysis jobs are not allowed\n" |
344 |
> |
msg += "\tPlease check DataDiscovery page https://cmsweb.cern.ch/dbs_discovery/\n" |
345 |
> |
raise CrabException(msg) |
346 |
> |
|
347 |
|
allSites = [] |
348 |
|
listSites = sites.values() |
349 |
|
for listSite in listSites: |
350 |
|
for oneSite in listSite: |
351 |
|
allSites.append(oneSite) |
352 |
< |
allSites = self.uniquelist(allSites) |
352 |
> |
[allSites.append(it) for it in allSites if not allSites.count(it)] |
353 |
|
|
341 |
– |
# screen output |
342 |
– |
common.logger.message("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n") |
343 |
– |
|
344 |
– |
return sites |
345 |
– |
|
346 |
– |
def jobSplittingByBlocks(self, blockSites): |
347 |
– |
""" |
348 |
– |
Perform job splitting. Jobs run over an integer number of files |
349 |
– |
and no more than one block. |
350 |
– |
ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values |
351 |
– |
REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs, |
352 |
– |
self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs, |
353 |
– |
self.maxEvents, self.filesbyblock |
354 |
– |
SETS: self.jobDestination - Site destination(s) for each job (a list of lists) |
355 |
– |
self.total_number_of_jobs - Total # of jobs |
356 |
– |
self.list_of_args - File(s) job will run on (a list of lists) |
357 |
– |
""" |
358 |
– |
|
359 |
– |
# ---- Handle the possible job splitting configurations ---- # |
360 |
– |
if (self.selectTotalNumberEvents): |
361 |
– |
totalEventsRequested = self.total_number_of_events |
362 |
– |
if (self.selectEventsPerJob): |
363 |
– |
eventsPerJobRequested = self.eventsPerJob |
364 |
– |
if (self.selectNumberOfJobs): |
365 |
– |
totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob |
366 |
– |
|
367 |
– |
# If user requested all the events in the dataset |
368 |
– |
if (totalEventsRequested == -1): |
369 |
– |
eventsRemaining=self.maxEvents |
370 |
– |
# If user requested more events than are in the dataset |
371 |
– |
elif (totalEventsRequested > self.maxEvents): |
372 |
– |
eventsRemaining = self.maxEvents |
373 |
– |
common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.") |
374 |
– |
# If user requested less events than are in the dataset |
375 |
– |
else: |
376 |
– |
eventsRemaining = totalEventsRequested |
377 |
– |
|
378 |
– |
# If user requested more events per job than are in the dataset |
379 |
– |
if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents): |
380 |
– |
eventsPerJobRequested = self.maxEvents |
381 |
– |
|
382 |
– |
# For user info at end |
383 |
– |
totalEventCount = 0 |
384 |
– |
|
385 |
– |
if (self.selectTotalNumberEvents and self.selectNumberOfJobs): |
386 |
– |
eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs) |
387 |
– |
|
388 |
– |
if (self.selectNumberOfJobs): |
389 |
– |
common.logger.message("May not create the exact number_of_jobs requested.") |
390 |
– |
|
391 |
– |
if ( self.ncjobs == 'all' ) : |
392 |
– |
totalNumberOfJobs = 999999999 |
393 |
– |
else : |
394 |
– |
totalNumberOfJobs = self.ncjobs |
395 |
– |
|
396 |
– |
blocks = blockSites.keys() |
397 |
– |
blockCount = 0 |
398 |
– |
# Backup variable in case self.maxEvents counted events in a non-included block |
399 |
– |
numBlocksInDataset = len(blocks) |
400 |
– |
|
401 |
– |
jobCount = 0 |
402 |
– |
list_of_lists = [] |
403 |
– |
|
404 |
– |
# list tracking which jobs are in which jobs belong to which block |
405 |
– |
jobsOfBlock = {} |
406 |
– |
|
407 |
– |
# ---- Iterate over the blocks in the dataset until ---- # |
408 |
– |
# ---- we've met the requested total # of events ---- # |
409 |
– |
while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)): |
410 |
– |
block = blocks[blockCount] |
411 |
– |
blockCount += 1 |
412 |
– |
if block not in jobsOfBlock.keys() : |
413 |
– |
jobsOfBlock[block] = [] |
414 |
– |
|
415 |
– |
if self.eventsbyblock.has_key(block) : |
416 |
– |
numEventsInBlock = self.eventsbyblock[block] |
417 |
– |
common.logger.debug(5,'Events in Block File '+str(numEventsInBlock)) |
418 |
– |
|
419 |
– |
files = self.filesbyblock[block] |
420 |
– |
numFilesInBlock = len(files) |
421 |
– |
if (numFilesInBlock <= 0): |
422 |
– |
continue |
423 |
– |
fileCount = 0 |
424 |
– |
|
425 |
– |
# ---- New block => New job ---- # |
426 |
– |
parString = "" |
427 |
– |
# counter for number of events in files currently worked on |
428 |
– |
filesEventCount = 0 |
429 |
– |
# flag if next while loop should touch new file |
430 |
– |
newFile = 1 |
431 |
– |
# job event counter |
432 |
– |
jobSkipEventCount = 0 |
433 |
– |
|
434 |
– |
# ---- Iterate over the files in the block until we've met the requested ---- # |
435 |
– |
# ---- total # of events or we've gone over all the files in this block ---- # |
436 |
– |
pString='' |
437 |
– |
while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ): |
438 |
– |
file = files[fileCount] |
439 |
– |
if self.useParent: |
440 |
– |
parent = self.parentFiles[file] |
441 |
– |
for f in parent : |
442 |
– |
pString += '\\\"' + f + '\\\"\,' |
443 |
– |
common.logger.debug(6, "File "+str(file)+" has the following parents: "+str(parent)) |
444 |
– |
common.logger.write("File "+str(file)+" has the following parents: "+str(parent)) |
445 |
– |
if newFile : |
446 |
– |
try: |
447 |
– |
numEventsInFile = self.eventsbyfile[file] |
448 |
– |
common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events") |
449 |
– |
# increase filesEventCount |
450 |
– |
filesEventCount += numEventsInFile |
451 |
– |
# Add file to current job |
452 |
– |
parString += '\\\"' + file + '\\\"\,' |
453 |
– |
newFile = 0 |
454 |
– |
except KeyError: |
455 |
– |
common.logger.message("File "+str(file)+" has unknown number of events: skipping") |
456 |
– |
|
457 |
– |
eventsPerJobRequested = min(eventsPerJobRequested, eventsRemaining) |
458 |
– |
# if less events in file remain than eventsPerJobRequested |
459 |
– |
if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested): |
460 |
– |
# if last file in block |
461 |
– |
if ( fileCount == numFilesInBlock-1 ) : |
462 |
– |
# end job using last file, use remaining events in block |
463 |
– |
# close job and touch new file |
464 |
– |
fullString = parString[:-2] |
465 |
– |
if self.useParent: |
466 |
– |
fullParentString = pString[:-2] |
467 |
– |
list_of_lists.append([fullString,fullParentString,str(-1),str(jobSkipEventCount)]) |
468 |
– |
else: |
469 |
– |
list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)]) |
470 |
– |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).") |
471 |
– |
self.jobDestination.append(blockSites[block]) |
472 |
– |
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
473 |
– |
# fill jobs of block dictionary |
474 |
– |
jobsOfBlock[block].append(jobCount+1) |
475 |
– |
# reset counter |
476 |
– |
jobCount = jobCount + 1 |
477 |
– |
totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount |
478 |
– |
eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount |
479 |
– |
jobSkipEventCount = 0 |
480 |
– |
# reset file |
481 |
– |
pString = "" |
482 |
– |
parString = "" |
483 |
– |
filesEventCount = 0 |
484 |
– |
newFile = 1 |
485 |
– |
fileCount += 1 |
486 |
– |
else : |
487 |
– |
# go to next file |
488 |
– |
newFile = 1 |
489 |
– |
fileCount += 1 |
490 |
– |
# if events in file equal to eventsPerJobRequested |
491 |
– |
elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) : |
492 |
– |
# close job and touch new file |
493 |
– |
fullString = parString[:-2] |
494 |
– |
if self.useParent: |
495 |
– |
fullParentString = pString[:-2] |
496 |
– |
list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
497 |
– |
else: |
498 |
– |
list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
499 |
– |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.") |
500 |
– |
self.jobDestination.append(blockSites[block]) |
501 |
– |
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
502 |
– |
jobsOfBlock[block].append(jobCount+1) |
503 |
– |
# reset counter |
504 |
– |
jobCount = jobCount + 1 |
505 |
– |
totalEventCount = totalEventCount + eventsPerJobRequested |
506 |
– |
eventsRemaining = eventsRemaining - eventsPerJobRequested |
507 |
– |
jobSkipEventCount = 0 |
508 |
– |
# reset file |
509 |
– |
pString = "" |
510 |
– |
parString = "" |
511 |
– |
filesEventCount = 0 |
512 |
– |
newFile = 1 |
513 |
– |
fileCount += 1 |
514 |
– |
|
515 |
– |
# if more events in file remain than eventsPerJobRequested |
516 |
– |
else : |
517 |
– |
# close job but don't touch new file |
518 |
– |
fullString = parString[:-2] |
519 |
– |
if self.useParent: |
520 |
– |
fullParentString = pString[:-2] |
521 |
– |
list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
522 |
– |
else: |
523 |
– |
list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
524 |
– |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.") |
525 |
– |
self.jobDestination.append(blockSites[block]) |
526 |
– |
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
527 |
– |
jobsOfBlock[block].append(jobCount+1) |
528 |
– |
# increase counter |
529 |
– |
jobCount = jobCount + 1 |
530 |
– |
totalEventCount = totalEventCount + eventsPerJobRequested |
531 |
– |
eventsRemaining = eventsRemaining - eventsPerJobRequested |
532 |
– |
# calculate skip events for last file |
533 |
– |
# use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest |
534 |
– |
jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file]) |
535 |
– |
# remove all but the last file |
536 |
– |
filesEventCount = self.eventsbyfile[file] |
537 |
– |
if self.useParent: |
538 |
– |
for f in parent : pString += '\\\"' + f + '\\\"\,' |
539 |
– |
parString = '\\\"' + file + '\\\"\,' |
540 |
– |
pass # END if |
541 |
– |
pass # END while (iterate over files in the block) |
542 |
– |
pass # END while (iterate over blocks in the dataset) |
543 |
– |
self.ncjobs = self.total_number_of_jobs = jobCount |
544 |
– |
if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ): |
545 |
– |
common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.") |
546 |
– |
common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n") |
354 |
|
|
355 |
|
# screen output |
356 |
< |
screenOutput = "List of jobs and available destination sites:\n\n" |
550 |
< |
|
551 |
< |
# keep trace of block with no sites to print a warning at the end |
552 |
< |
noSiteBlock = [] |
553 |
< |
bloskNoSite = [] |
554 |
< |
|
555 |
< |
blockCounter = 0 |
556 |
< |
for block in blocks: |
557 |
< |
if block in jobsOfBlock.keys() : |
558 |
< |
blockCounter += 1 |
559 |
< |
screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]), |
560 |
< |
','.join(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],block),block))) |
561 |
< |
if len(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],block),block)) == 0: |
562 |
< |
noSiteBlock.append( spanRanges(jobsOfBlock[block]) ) |
563 |
< |
bloskNoSite.append( blockCounter ) |
564 |
< |
|
565 |
< |
common.logger.message(screenOutput) |
566 |
< |
if len(noSiteBlock) > 0 and len(bloskNoSite) > 0: |
567 |
< |
msg = 'WARNING: No sites are hosting any part of data for block:\n ' |
568 |
< |
virgola = "" |
569 |
< |
if len(bloskNoSite) > 1: |
570 |
< |
virgola = "," |
571 |
< |
for block in bloskNoSite: |
572 |
< |
msg += ' ' + str(block) + virgola |
573 |
< |
msg += '\n Related jobs:\n ' |
574 |
< |
virgola = "" |
575 |
< |
if len(noSiteBlock) > 1: |
576 |
< |
virgola = "," |
577 |
< |
for range_jobs in noSiteBlock: |
578 |
< |
msg += str(range_jobs) + virgola |
579 |
< |
msg += '\n will not be submitted and this block of data can not be analyzed!\n' |
580 |
< |
if self.cfg_params.has_key('EDG.se_white_list'): |
581 |
< |
msg += 'WARNING: SE White List: '+self.cfg_params['EDG.se_white_list']+'\n' |
582 |
< |
msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n' |
583 |
< |
msg += 'Please check if the dataset is available at this site!)\n' |
584 |
< |
if self.cfg_params.has_key('EDG.ce_white_list'): |
585 |
< |
msg += 'WARNING: CE White List: '+self.cfg_params['EDG.ce_white_list']+'\n' |
586 |
< |
msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n' |
587 |
< |
msg += 'Please check if the dataset is available at this site!)\n' |
588 |
< |
|
589 |
< |
common.logger.message(msg) |
356 |
> |
common.logger.info("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n") |
357 |
|
|
358 |
< |
self.list_of_args = list_of_lists |
592 |
< |
return |
593 |
< |
|
594 |
< |
def jobSplittingNoInput(self): |
595 |
< |
""" |
596 |
< |
Perform job splitting based on number of event per job |
597 |
< |
""" |
598 |
< |
common.logger.debug(5,'Splitting per events') |
599 |
< |
|
600 |
< |
if (self.selectEventsPerJob): |
601 |
< |
common.logger.message('Required '+str(self.eventsPerJob)+' events per job ') |
602 |
< |
if (self.selectNumberOfJobs): |
603 |
< |
common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ') |
604 |
< |
if (self.selectTotalNumberEvents): |
605 |
< |
common.logger.message('Required '+str(self.total_number_of_events)+' events in total ') |
606 |
< |
|
607 |
< |
if (self.total_number_of_events < 0): |
608 |
< |
msg='Cannot split jobs per Events with "-1" as total number of events' |
609 |
< |
raise CrabException(msg) |
610 |
< |
|
611 |
< |
if (self.selectEventsPerJob): |
612 |
< |
if (self.selectTotalNumberEvents): |
613 |
< |
self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob) |
614 |
< |
elif(self.selectNumberOfJobs) : |
615 |
< |
self.total_number_of_jobs =self.theNumberOfJobs |
616 |
< |
self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob) |
617 |
< |
|
618 |
< |
elif (self.selectNumberOfJobs) : |
619 |
< |
self.total_number_of_jobs = self.theNumberOfJobs |
620 |
< |
self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs) |
621 |
< |
|
622 |
< |
common.logger.debug(5,'N jobs '+str(self.total_number_of_jobs)) |
623 |
< |
|
624 |
< |
# is there any remainder? |
625 |
< |
check = int(self.total_number_of_events) - (int(self.total_number_of_jobs)*self.eventsPerJob) |
626 |
< |
|
627 |
< |
common.logger.debug(5,'Check '+str(check)) |
628 |
< |
|
629 |
< |
common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events') |
630 |
< |
if check > 0: |
631 |
< |
common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob)) |
632 |
< |
|
633 |
< |
# argument is seed number.$i |
634 |
< |
self.list_of_args = [] |
635 |
< |
for i in range(self.total_number_of_jobs): |
636 |
< |
## Since there is no input, any site is good |
637 |
< |
self.jobDestination.append([""]) #must be empty to write correctly the xml |
638 |
< |
args=[] |
639 |
< |
if (self.firstRun): |
640 |
< |
## pythia first run |
641 |
< |
args.append(str(self.firstRun)+str(i)) |
642 |
< |
self.list_of_args.append(args) |
643 |
< |
|
644 |
< |
return |
645 |
< |
|
646 |
< |
|
647 |
< |
def jobSplittingForScript(self): |
648 |
< |
""" |
649 |
< |
Perform job splitting based on number of job |
650 |
< |
""" |
651 |
< |
common.logger.debug(5,'Splitting per job') |
652 |
< |
common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ') |
653 |
< |
|
654 |
< |
self.total_number_of_jobs = self.theNumberOfJobs |
358 |
> |
return sites |
359 |
|
|
656 |
– |
common.logger.debug(5,'N jobs '+str(self.total_number_of_jobs)) |
360 |
|
|
361 |
< |
common.logger.message(str(self.total_number_of_jobs)+' jobs can be created') |
361 |
> |
def split(self, jobParams,firstJobID): |
362 |
|
|
363 |
< |
# argument is seed number.$i |
364 |
< |
self.list_of_args = [] |
365 |
< |
for i in range(self.total_number_of_jobs): |
663 |
< |
self.jobDestination.append([""]) |
664 |
< |
self.list_of_args.append([str(i)]) |
665 |
< |
return |
363 |
> |
jobParams = self.dict['args'] |
364 |
> |
njobs = self.dict['njobs'] |
365 |
> |
self.jobDestination = self.dict['jobDestination'] |
366 |
|
|
367 |
< |
def split(self, jobParams,firstJobID): |
367 |
> |
if njobs==0: |
368 |
> |
raise CrabException("Ask to split "+str(njobs)+" jobs: aborting") |
369 |
|
|
669 |
– |
njobs = self.total_number_of_jobs |
670 |
– |
arglist = self.list_of_args |
370 |
|
# create the empty structure |
371 |
|
for i in range(njobs): |
372 |
|
jobParams.append("") |
373 |
|
|
374 |
|
listID=[] |
375 |
|
listField=[] |
376 |
+ |
listDictions=[] |
377 |
+ |
exist= os.path.exists(self.argsFile) |
378 |
|
for id in range(njobs): |
379 |
|
job = id + int(firstJobID) |
679 |
– |
jobParams[id] = arglist[id] |
380 |
|
listID.append(job+1) |
381 |
|
job_ToSave ={} |
382 |
|
concString = ' ' |
383 |
|
argu='' |
384 |
+ |
str_argu = str(job+1) |
385 |
|
if len(jobParams[id]): |
386 |
< |
argu += concString.join(jobParams[id] ) |
387 |
< |
job_ToSave['arguments']= str(job+1)+' '+argu |
386 |
> |
argu = {'JobID': job+1} |
387 |
> |
for i in range(len(jobParams[id])): |
388 |
> |
argu[self.dict['params'][i]]=jobParams[id][i] |
389 |
> |
# just for debug |
390 |
> |
str_argu += concString.join(jobParams[id]) |
391 |
> |
if argu != '': listDictions.append(argu) |
392 |
> |
job_ToSave['arguments']= str(job+1) |
393 |
|
job_ToSave['dlsDestination']= self.jobDestination[id] |
394 |
|
listField.append(job_ToSave) |
395 |
< |
msg="Job "+str(job)+" Arguments: "+str(job+1)+" "+argu+"\n" \ |
396 |
< |
+" Destination: "+str(self.jobDestination[id]) |
397 |
< |
common.logger.debug(5,msg) |
395 |
> |
from ProdCommon.SiteDB.CmsSiteMapper import CmsSEMap |
396 |
> |
cms_se = CmsSEMap() |
397 |
> |
msg="Job %s Arguments: %s\n"%(str(job+1),str_argu) |
398 |
> |
msg+="\t Destination: %s "%(str(self.jobDestination[id])) |
399 |
> |
SEDestination = [cms_se[dest] for dest in self.jobDestination[id]] |
400 |
> |
msg+="\t CMSDestination: %s "%(str(SEDestination)) |
401 |
> |
common.logger.log(10-1,msg) |
402 |
> |
# write xml |
403 |
> |
if len(listDictions): |
404 |
> |
if exist==False: self.CreateXML() |
405 |
> |
self.addEntry(listDictions) |
406 |
> |
self.addXMLfile() |
407 |
|
common._db.updateJob_(listID,listField) |
408 |
< |
self.argsList = (len(jobParams[0])+1) |
408 |
> |
self.zipTarFile() |
409 |
> |
return |
410 |
> |
|
411 |
> |
def addXMLfile(self): |
412 |
> |
|
413 |
> |
import tarfile |
414 |
> |
try: |
415 |
> |
tar = tarfile.open(self.tarNameWithPath, "a") |
416 |
> |
tar.add(self.argsFile, os.path.basename(self.argsFile)) |
417 |
> |
tar.close() |
418 |
> |
except IOError, exc: |
419 |
> |
msg = 'Could not add %s to %s \n'%(self.argsFile,self.tarNameWithPath) |
420 |
> |
msg += str(exc) |
421 |
> |
raise CrabException(msg) |
422 |
> |
except tarfile.TarError, exc: |
423 |
> |
msg = 'Could not add %s to %s \n'%(self.argsFile,self.tarNameWithPath) |
424 |
> |
msg += str(exc) |
425 |
> |
raise CrabException(msg) |
426 |
|
|
427 |
+ |
def CreateXML(self): |
428 |
+ |
""" |
429 |
+ |
""" |
430 |
+ |
result = IMProvNode( self.rootArgsFilename ) |
431 |
+ |
outfile = file( self.argsFile, 'w').write(str(result)) |
432 |
+ |
return |
433 |
+ |
|
434 |
+ |
def addEntry(self, listDictions): |
435 |
+ |
""" |
436 |
+ |
_addEntry_ |
437 |
+ |
|
438 |
+ |
add an entry to the xml file |
439 |
+ |
""" |
440 |
+ |
from IMProv.IMProvLoader import loadIMProvFile |
441 |
+ |
## load xml |
442 |
+ |
improvDoc = loadIMProvFile(self.argsFile) |
443 |
+ |
entrname= 'Job' |
444 |
+ |
for dictions in listDictions: |
445 |
+ |
report = IMProvNode(entrname , None, **dictions) |
446 |
+ |
improvDoc.addNode(report) |
447 |
+ |
outfile = file( self.argsFile, 'w').write(str(improvDoc)) |
448 |
|
return |
449 |
|
|
450 |
|
def numberOfJobs(self): |
451 |
< |
return self.total_number_of_jobs |
451 |
> |
return self.dict['njobs'] |
452 |
|
|
453 |
|
def getTarBall(self, exe): |
454 |
|
""" |
455 |
|
Return the TarBall with lib and exe |
456 |
|
""" |
457 |
< |
self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name |
458 |
< |
if os.path.exists(self.tgzNameWithPath): |
459 |
< |
return self.tgzNameWithPath |
457 |
> |
self.tarNameWithPath = common.work_space.pathForTgz()+self.tar_name |
458 |
> |
if os.path.exists(self.tarNameWithPath): |
459 |
> |
return self.tarNameWithPath |
460 |
|
|
461 |
|
# Prepare a tar gzipped file with user binaries. |
462 |
|
self.buildTar_(exe) |
463 |
|
|
464 |
< |
return string.strip(self.tgzNameWithPath) |
464 |
> |
return string.strip(self.tarNameWithPath) |
465 |
|
|
466 |
|
def buildTar_(self, executable): |
467 |
|
|
471 |
|
|
472 |
|
## check if working area is release top |
473 |
|
if swReleaseTop == '' or swArea == swReleaseTop: |
474 |
< |
common.logger.debug(3,"swArea = "+swArea+" swReleaseTop ="+swReleaseTop) |
474 |
> |
common.logger.debug("swArea = "+swArea+" swReleaseTop ="+swReleaseTop) |
475 |
|
return |
476 |
|
|
477 |
|
import tarfile |
478 |
|
try: # create tar ball |
479 |
< |
tar = tarfile.open(self.tgzNameWithPath, "w:gz") |
479 |
> |
#tar = tarfile.open(self.tgzNameWithPath, "w:gz") |
480 |
> |
tar = tarfile.open(self.tarNameWithPath, "w") |
481 |
|
## First find the executable |
482 |
|
if (self.executable != ''): |
483 |
|
exeWithPath = self.scram.findFile_(executable) |
487 |
|
## then check if it's private or not |
488 |
|
if exeWithPath.find(swReleaseTop) == -1: |
489 |
|
# the exe is private, so we must ship |
490 |
< |
common.logger.debug(5,"Exe "+exeWithPath+" to be tarred") |
490 |
> |
common.logger.debug("Exe "+exeWithPath+" to be tarred") |
491 |
|
path = swArea+'/' |
492 |
|
# distinguish case when script is in user project area or given by full path somewhere else |
493 |
|
if exeWithPath.find(path) >= 0 : |
501 |
|
pass |
502 |
|
|
503 |
|
## Now get the libraries: only those in local working area |
504 |
+ |
tar.dereference=True |
505 |
|
libDir = 'lib' |
506 |
|
lib = swArea+'/' +libDir |
507 |
< |
common.logger.debug(5,"lib "+lib+" to be tarred") |
507 |
> |
common.logger.debug("lib "+lib+" to be tarred") |
508 |
|
if os.path.exists(lib): |
509 |
|
tar.add(lib,libDir) |
510 |
|
|
513 |
|
module = swArea + '/' + moduleDir |
514 |
|
if os.path.isdir(module): |
515 |
|
tar.add(module,moduleDir) |
516 |
+ |
tar.dereference=False |
517 |
|
|
518 |
|
## Now check if any data dir(s) is present |
519 |
|
self.dataExist = False |
527 |
|
todo_list += [(entryPath + i, i) for i in os.listdir(swArea+"/src/"+entry)] |
528 |
|
if name == 'data': |
529 |
|
self.dataExist=True |
530 |
< |
common.logger.debug(5,"data "+entry+" to be tarred") |
530 |
> |
common.logger.debug("data "+entry+" to be tarred") |
531 |
|
tar.add(swArea+"/src/"+entry,"src/"+entry) |
532 |
|
pass |
533 |
|
pass |
536 |
|
if not self.pset is None: |
537 |
|
cfg_file = common.work_space.jobDir()+self.configFilename() |
538 |
|
tar.add(cfg_file,self.configFilename()) |
783 |
– |
common.logger.debug(5,"File added to "+self.tgzNameWithPath+" : "+str(tar.getnames())) |
539 |
|
|
540 |
+ |
try: |
541 |
+ |
crab_cfg_file = common.work_space.shareDir()+'/crab.cfg' |
542 |
+ |
tar.add(crab_cfg_file,'crab.cfg') |
543 |
+ |
except: |
544 |
+ |
pass |
545 |
|
|
546 |
|
## Add ProdCommon dir to tar |
547 |
|
prodcommonDir = './' |
548 |
|
prodcommonPath = os.environ['CRABDIR'] + '/' + 'external/' |
549 |
< |
neededStuff = ['ProdCommon/__init__.py','ProdCommon/FwkJobRep', 'ProdCommon/CMSConfigTools','ProdCommon/Core','ProdCommon/MCPayloads', 'IMProv'] |
549 |
> |
neededStuff = ['ProdCommon/__init__.py','ProdCommon/FwkJobRep', 'ProdCommon/CMSConfigTools', \ |
550 |
> |
'ProdCommon/Core', 'ProdCommon/MCPayloads', 'IMProv', 'ProdCommon/Storage', \ |
551 |
> |
'WMCore/__init__.py','WMCore/Algorithms'] |
552 |
|
for file in neededStuff: |
553 |
|
tar.add(prodcommonPath+file,prodcommonDir+file) |
792 |
– |
common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames())) |
554 |
|
|
555 |
|
##### ML stuff |
556 |
|
ML_file_list=['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py'] |
557 |
|
path=os.environ['CRABDIR'] + '/python/' |
558 |
|
for file in ML_file_list: |
559 |
|
tar.add(path+file,file) |
799 |
– |
common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames())) |
560 |
|
|
561 |
|
##### Utils |
562 |
< |
Utils_file_list=['parseCrabFjr.py','writeCfg.py', 'fillCrabFjr.py'] |
562 |
> |
Utils_file_list=['parseCrabFjr.py','writeCfg.py', 'fillCrabFjr.py','cmscp.py'] |
563 |
|
for file in Utils_file_list: |
564 |
|
tar.add(path+file,file) |
805 |
– |
common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames())) |
565 |
|
|
566 |
|
##### AdditionalFiles |
567 |
+ |
tar.dereference=True |
568 |
|
for file in self.additional_inbox_files: |
569 |
|
tar.add(file,string.split(file,'/')[-1]) |
570 |
< |
common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames())) |
570 |
> |
tar.dereference=False |
571 |
> |
common.logger.log(10-1,"Files in "+self.tarNameWithPath+" : "+str(tar.getnames())) |
572 |
|
|
573 |
|
tar.close() |
574 |
< |
except IOError: |
575 |
< |
raise CrabException('Could not create tar-ball '+self.tgzNameWithPath) |
576 |
< |
except tarfile.TarError: |
577 |
< |
raise CrabException('Could not create tar-ball '+self.tgzNameWithPath) |
574 |
> |
except IOError, exc: |
575 |
> |
msg = 'Could not create tar-ball %s \n'%self.tarNameWithPath |
576 |
> |
msg += str(exc) |
577 |
> |
raise CrabException(msg) |
578 |
> |
except tarfile.TarError, exc: |
579 |
> |
msg = 'Could not create tar-ball %s \n'%self.tarNameWithPath |
580 |
> |
msg += str(exc) |
581 |
> |
raise CrabException(msg) |
582 |
> |
|
583 |
> |
def zipTarFile(self): |
584 |
> |
|
585 |
> |
cmd = "gzip -c %s > %s "%(self.tarNameWithPath,self.tgzNameWithPath) |
586 |
> |
res=runCommand(cmd) |
587 |
|
|
818 |
– |
## check for tarball size |
588 |
|
tarballinfo = os.stat(self.tgzNameWithPath) |
589 |
|
if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) : |
590 |
< |
raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.') |
590 |
> |
msg = 'Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) \ |
591 |
> |
+'MB input sandbox limit \n' |
592 |
> |
msg += ' and not supported by the direct GRID submission system.\n' |
593 |
> |
msg += ' Please use the CRAB server mode by setting server_name=<NAME> in section [CRAB] of your crab.cfg.\n' |
594 |
> |
msg += ' For further infos please see https://twiki.cern.ch/twiki/bin/view/CMS/CrabServer#CRABSERVER_for_Users' |
595 |
> |
raise CrabException(msg) |
596 |
|
|
597 |
|
## create tar-ball with ML stuff |
598 |
|
|
601 |
|
Returns part of a job script which prepares |
602 |
|
the execution environment for the job 'nj'. |
603 |
|
""" |
604 |
+ |
# FUTURE: Drop support for .cfg when possible |
605 |
|
if (self.CMSSW_major >= 2 and self.CMSSW_minor >= 1) or (self.CMSSW_major >= 3): |
606 |
|
psetName = 'pset.py' |
607 |
|
else: |
609 |
|
# Prepare JobType-independent part |
610 |
|
txt = '\n#Written by cms_cmssw::wsSetupEnvironment\n' |
611 |
|
txt += 'echo ">>> setup environment"\n' |
612 |
< |
txt += 'if [ $middleware == LCG ]; then \n' |
612 |
> |
txt += 'if [ $middleware == LCG ] || [ $middleware == CAF ] || [ $middleware == LSF ]; then \n' |
613 |
|
txt += self.wsSetupCMSLCGEnvironment_() |
614 |
|
txt += 'elif [ $middleware == OSG ]; then\n' |
615 |
|
txt += ' WORKING_DIR=`/bin/mktemp -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n' |
624 |
|
txt += ' cd $WORKING_DIR\n' |
625 |
|
txt += ' echo ">>> current directory (WORKING_DIR): $WORKING_DIR"\n' |
626 |
|
txt += self.wsSetupCMSOSGEnvironment_() |
627 |
+ |
#Setup SGE Environment |
628 |
+ |
txt += 'elif [ $middleware == SGE ]; then\n' |
629 |
+ |
txt += self.wsSetupCMSLCGEnvironment_() |
630 |
+ |
|
631 |
+ |
txt += 'elif [ $middleware == ARC ]; then\n' |
632 |
+ |
txt += self.wsSetupCMSLCGEnvironment_() |
633 |
+ |
|
634 |
|
txt += 'fi\n' |
635 |
|
|
636 |
|
# Prepare JobType-specific part |
646 |
|
txt += ' func_exit\n' |
647 |
|
txt += 'fi \n' |
648 |
|
txt += 'cd '+self.version+'\n' |
649 |
< |
txt += 'SOFTWARE_DIR=`pwd`\n' |
649 |
> |
txt += 'SOFTWARE_DIR=`pwd`; export SOFTWARE_DIR\n' |
650 |
|
txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n' |
651 |
|
txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n' |
652 |
|
txt += 'if [ $? != 0 ] ; then\n' |
670 |
|
# Prepare job-specific part |
671 |
|
job = common.job_list[nj] |
672 |
|
if (self.datasetPath): |
673 |
+ |
self.primaryDataset = self.datasetPath.split("/")[1] |
674 |
+ |
DataTier = self.datasetPath.split("/")[2] |
675 |
|
txt += '\n' |
676 |
|
txt += 'DatasetPath='+self.datasetPath+'\n' |
677 |
|
|
678 |
< |
datasetpath_split = self.datasetPath.split("/") |
679 |
< |
|
896 |
< |
txt += 'PrimaryDataset='+datasetpath_split[1]+'\n' |
897 |
< |
txt += 'DataTier='+datasetpath_split[2]+'\n' |
678 |
> |
txt += 'PrimaryDataset='+self.primaryDataset +'\n' |
679 |
> |
txt += 'DataTier='+DataTier+'\n' |
680 |
|
txt += 'ApplicationFamily=cmsRun\n' |
681 |
|
|
682 |
|
else: |
683 |
+ |
self.primaryDataset = 'null' |
684 |
|
txt += 'DatasetPath=MCDataTier\n' |
685 |
|
txt += 'PrimaryDataset=null\n' |
686 |
|
txt += 'DataTier=null\n' |
689 |
|
pset = os.path.basename(job.configFilename()) |
690 |
|
txt += '\n' |
691 |
|
txt += 'cp $RUNTIME_AREA/'+pset+' .\n' |
909 |
– |
if (self.datasetPath): # standard job |
910 |
– |
txt += 'InputFiles=${args[1]}; export InputFiles\n' |
911 |
– |
if (self.useParent): |
912 |
– |
txt += 'ParentFiles=${args[2]}; export ParentFiles\n' |
913 |
– |
txt += 'MaxEvents=${args[3]}; export MaxEvents\n' |
914 |
– |
txt += 'SkipEvents=${args[4]}; export SkipEvents\n' |
915 |
– |
else: |
916 |
– |
txt += 'MaxEvents=${args[2]}; export MaxEvents\n' |
917 |
– |
txt += 'SkipEvents=${args[3]}; export SkipEvents\n' |
918 |
– |
txt += 'echo "Inputfiles:<$InputFiles>"\n' |
919 |
– |
if (self.useParent): txt += 'echo "ParentFiles:<$ParentFiles>"\n' |
920 |
– |
txt += 'echo "MaxEvents:<$MaxEvents>"\n' |
921 |
– |
txt += 'echo "SkipEvents:<$SkipEvents>"\n' |
922 |
– |
else: # pythia like job |
923 |
– |
txt += 'PreserveSeeds=' + ','.join(self.preserveSeeds) + '; export PreserveSeeds\n' |
924 |
– |
txt += 'IncrementSeeds=' + ','.join(self.incrementSeeds) + '; export IncrementSeeds\n' |
925 |
– |
txt += 'echo "PreserveSeeds: <$PreserveSeeds>"\n' |
926 |
– |
txt += 'echo "IncrementSeeds:<$IncrementSeeds>"\n' |
927 |
– |
if (self.firstRun): |
928 |
– |
txt += 'FirstRun=${args[1]}; export FirstRun\n' |
929 |
– |
txt += 'echo "FirstRun: <$FirstRun>"\n' |
930 |
– |
|
931 |
– |
txt += 'mv -f ' + pset + ' ' + psetName + '\n' |
692 |
|
|
693 |
+ |
txt += 'PreserveSeeds=' + ','.join(self.preserveSeeds) + '; export PreserveSeeds\n' |
694 |
+ |
txt += 'IncrementSeeds=' + ','.join(self.incrementSeeds) + '; export IncrementSeeds\n' |
695 |
+ |
txt += 'echo "PreserveSeeds: <$PreserveSeeds>"\n' |
696 |
+ |
txt += 'echo "IncrementSeeds:<$IncrementSeeds>"\n' |
697 |
|
|
698 |
< |
if self.pset != None: |
699 |
< |
# FUTURE: Can simply for 2_1_x and higher |
936 |
< |
txt += '\n' |
937 |
< |
if self.debug_wrapper==True: |
938 |
< |
txt += 'echo "***** cat ' + psetName + ' *********"\n' |
939 |
< |
txt += 'cat ' + psetName + '\n' |
940 |
< |
txt += 'echo "****** end ' + psetName + ' ********"\n' |
941 |
< |
txt += '\n' |
942 |
< |
if (self.CMSSW_major >= 2 and self.CMSSW_minor >= 1) or (self.CMSSW_major >= 3): |
943 |
< |
txt += 'PSETHASH=`edmConfigHash ' + psetName + '` \n' |
944 |
< |
else: |
945 |
< |
txt += 'PSETHASH=`edmConfigHash < ' + psetName + '` \n' |
946 |
< |
txt += 'echo "PSETHASH = $PSETHASH" \n' |
698 |
> |
txt += 'mv -f ' + pset + ' ' + psetName + '\n' |
699 |
> |
else: |
700 |
|
txt += '\n' |
701 |
+ |
txt += 'export AdditionalArgs=%s\n'%(self.AdditionalArgs) |
702 |
+ |
|
703 |
|
return txt |
704 |
|
|
705 |
|
def wsUntarSoftware(self, nj=0): |
712 |
|
|
713 |
|
if os.path.isfile(self.tgzNameWithPath): |
714 |
|
txt += 'echo ">>> tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+' :" \n' |
715 |
< |
txt += 'tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'\n' |
716 |
< |
if self.debug_wrapper: |
715 |
> |
txt += 'tar zxvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'\n' |
716 |
> |
if self.debug_wrapper==1 : |
717 |
|
txt += 'ls -Al \n' |
718 |
|
txt += 'untar_status=$? \n' |
719 |
|
txt += 'if [ $untar_status -ne 0 ]; then \n' |
767 |
|
txt += 'fi\n' |
768 |
|
txt += '\n' |
769 |
|
|
770 |
+ |
if self.pset != None: |
771 |
+ |
# FUTURE: Drop support for .cfg when possible |
772 |
+ |
if (self.CMSSW_major >= 2 and self.CMSSW_minor >= 1) or (self.CMSSW_major >= 3): |
773 |
+ |
psetName = 'pset.py' |
774 |
+ |
else: |
775 |
+ |
psetName = 'pset.cfg' |
776 |
+ |
# FUTURE: Can simply for 2_1_x and higher |
777 |
+ |
txt += '\n' |
778 |
+ |
if self.debug_wrapper == 1: |
779 |
+ |
txt += 'echo "***** cat ' + psetName + ' *********"\n' |
780 |
+ |
txt += 'cat ' + psetName + '\n' |
781 |
+ |
txt += 'echo "****** end ' + psetName + ' ********"\n' |
782 |
+ |
txt += '\n' |
783 |
+ |
txt += 'echo "***********************" \n' |
784 |
+ |
txt += 'which edmConfigHash \n' |
785 |
+ |
txt += 'echo "***********************" \n' |
786 |
+ |
if (self.CMSSW_major >= 2 and self.CMSSW_minor >= 1) or (self.CMSSW_major >= 3): |
787 |
+ |
txt += 'edmConfigHash ' + psetName + ' \n' |
788 |
+ |
txt += 'PSETHASH=`edmConfigHash ' + psetName + '` \n' |
789 |
+ |
else: |
790 |
+ |
txt += 'PSETHASH=`edmConfigHash < ' + psetName + '` \n' |
791 |
+ |
txt += 'echo "PSETHASH = $PSETHASH" \n' |
792 |
+ |
#### FEDE temporary fix for noEdm files ##### |
793 |
+ |
txt += 'if [ -z "$PSETHASH" ]; then \n' |
794 |
+ |
txt += ' export PSETHASH=null\n' |
795 |
+ |
txt += 'fi \n' |
796 |
+ |
############################################# |
797 |
+ |
txt += '\n' |
798 |
|
return txt |
799 |
|
|
800 |
|
|
806 |
|
|
807 |
|
def executableArgs(self): |
808 |
|
# FUTURE: This function tests the CMSSW version. Can be simplified as we drop support for old versions |
809 |
< |
if self.scriptExe:#CarlosDaniele |
810 |
< |
return self.scriptExe + " $NJob" |
809 |
> |
if self.scriptExe: |
810 |
> |
return self.scriptExe + " $NJob $AdditionalArgs" |
811 |
|
else: |
812 |
|
ex_args = "" |
813 |
< |
# FUTURE: This tests the CMSSW version. Can remove code as versions deprecated |
814 |
< |
# Framework job report |
1032 |
< |
if (self.CMSSW_major >= 1 and self.CMSSW_minor >= 5) or (self.CMSSW_major >= 2): |
1033 |
< |
ex_args += " -j $RUNTIME_AREA/crab_fjr_$NJob.xml" |
1034 |
< |
# Type of config file |
813 |
> |
ex_args += " -j $RUNTIME_AREA/crab_fjr_$NJob.xml" |
814 |
> |
# Type of config file depends on CMSSW version |
815 |
|
if self.CMSSW_major >= 2 : |
816 |
|
ex_args += " -p pset.py" |
817 |
|
else: |
825 |
|
inp_box = [] |
826 |
|
if os.path.isfile(self.tgzNameWithPath): |
827 |
|
inp_box.append(self.tgzNameWithPath) |
828 |
< |
wrapper = os.path.basename(str(common._db.queryTask('scriptName'))) |
1049 |
< |
inp_box.append(common.work_space.pathForTgz() +'job/'+ wrapper) |
828 |
> |
inp_box.append(common.work_space.jobDir() + self.scriptName) |
829 |
|
return inp_box |
830 |
|
|
831 |
|
def outputSandbox(self, nj): |
849 |
|
txt = '\n#Written by cms_cmssw::wsRenameOutput\n' |
850 |
|
txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n' |
851 |
|
txt += 'echo ">>> current directory content:"\n' |
852 |
< |
if self.debug_wrapper: |
852 |
> |
if self.debug_wrapper==1: |
853 |
|
txt += 'ls -Al\n' |
854 |
|
txt += '\n' |
855 |
|
|
875 |
|
txt += 'fi\n' |
876 |
|
file_list = [] |
877 |
|
for fileWithSuffix in (self.output_file): |
878 |
< |
file_list.append(numberFile(fileWithSuffix, '$NJob')) |
878 |
> |
file_list.append(numberFile('$SOFTWARE_DIR/'+fileWithSuffix, '$NJob')) |
879 |
|
|
880 |
< |
txt += 'file_list="'+string.join(file_list,' ')+'"\n' |
880 |
> |
txt += 'file_list="'+string.join(file_list,',')+'"\n' |
881 |
|
txt += '\n' |
882 |
|
txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n' |
883 |
|
txt += 'echo ">>> current directory content:"\n' |
884 |
< |
if self.debug_wrapper: |
884 |
> |
if self.debug_wrapper==1: |
885 |
|
txt += 'ls -Al\n' |
886 |
|
txt += '\n' |
887 |
|
txt += 'cd $RUNTIME_AREA\n' |
903 |
|
'", other.GlueHostApplicationSoftwareRunTimeEnvironment)' |
904 |
|
|
905 |
|
req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)' |
906 |
< |
if common.scheduler.name() == "glitecoll": |
906 |
> |
if ( common.scheduler.name() == "glitecoll" ) or ( common.scheduler.name() == "glite"): |
907 |
|
req += ' && other.GlueCEStateStatus == "Production" ' |
908 |
|
|
909 |
|
return req |
974 |
|
txt += ' echo "==> setup cms environment ok"\n' |
975 |
|
return txt |
976 |
|
|
977 |
< |
def modifyReport(self, nj): |
977 |
> |
def wsModifyReport(self, nj): |
978 |
|
""" |
979 |
|
insert the part of the script that modifies the FrameworkJob Report |
980 |
|
""" |
981 |
< |
txt = '\n#Written by cms_cmssw::modifyReport\n' |
981 |
> |
|
982 |
> |
txt = '' |
983 |
|
publish_data = int(self.cfg_params.get('USER.publish_data',0)) |
984 |
< |
if (publish_data == 1): |
985 |
< |
processedDataset = self.cfg_params['USER.publish_data_name'] |
986 |
< |
### FEDE for publication with LSF and CAF schedulers #### |
987 |
< |
print "common.scheduler.name().upper() = ", common.scheduler.name().upper() |
1208 |
< |
if (common.scheduler.name().upper() == "CAF" or common.scheduler.name().upper() == "LSF"): |
1209 |
< |
print "chiamo LFNBaseName con localUser = true" |
1210 |
< |
LFNBaseName = LFNBase(processedDataset, LocalUser=True) |
1211 |
< |
else : |
1212 |
< |
LFNBaseName = LFNBase(processedDataset) |
1213 |
< |
#### |
984 |
> |
#if (publish_data == 1): |
985 |
> |
if (self.copy_data == 1): |
986 |
> |
txt = '\n#Written by cms_cmssw::wsModifyReport\n' |
987 |
> |
publish_data = int(self.cfg_params.get('USER.publish_data',0)) |
988 |
|
|
989 |
< |
txt += 'if [ $copy_exit_status -eq 0 ]; then\n' |
990 |
< |
txt += ' FOR_LFN=%s_${PSETHASH}/\n'%(LFNBaseName) |
989 |
> |
|
990 |
> |
txt += 'if [ $StageOutExitStatus -eq 0 ]; then\n' |
991 |
> |
txt += ' FOR_LFN=$LFNBaseName\n' |
992 |
|
txt += 'else\n' |
993 |
|
txt += ' FOR_LFN=/copy_problems/ \n' |
1219 |
– |
txt += ' SE=""\n' |
1220 |
– |
txt += ' SE_PATH=""\n' |
994 |
|
txt += 'fi\n' |
995 |
|
|
996 |
|
txt += 'echo ">>> Modify Job Report:" \n' |
997 |
|
txt += 'chmod a+x $RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py\n' |
1225 |
– |
txt += 'ProcessedDataset='+processedDataset+'\n' |
1226 |
– |
txt += 'echo "ProcessedDataset = $ProcessedDataset"\n' |
998 |
|
txt += 'echo "SE = $SE"\n' |
999 |
|
txt += 'echo "SE_PATH = $SE_PATH"\n' |
1000 |
|
txt += 'echo "FOR_LFN = $FOR_LFN" \n' |
1001 |
|
txt += 'echo "CMSSW_VERSION = $CMSSW_VERSION"\n\n' |
1002 |
< |
txt += 'echo "$RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py $RUNTIME_AREA/crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n' |
1003 |
< |
txt += '$RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py $RUNTIME_AREA/crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n' |
1002 |
> |
|
1003 |
> |
|
1004 |
> |
args = 'fjr $RUNTIME_AREA/crab_fjr_$NJob.xml n_job $NJob for_lfn $FOR_LFN PrimaryDataset $PrimaryDataset ApplicationFamily $ApplicationFamily ApplicationName $executable cmssw_version $CMSSW_VERSION psethash $PSETHASH se_name $SE se_path $SE_PATH' |
1005 |
> |
if (publish_data == 1): |
1006 |
> |
processedDataset = self.cfg_params['USER.publish_data_name'] |
1007 |
> |
txt += 'ProcessedDataset='+processedDataset+'\n' |
1008 |
> |
txt += 'echo "ProcessedDataset = $ProcessedDataset"\n' |
1009 |
> |
args += ' UserProcessedDataset $USER-$ProcessedDataset-$PSETHASH' |
1010 |
> |
|
1011 |
> |
txt += 'echo "$RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py '+str(args)+'"\n' |
1012 |
> |
txt += '$RUNTIME_AREA/ProdCommon/FwkJobRep/ModifyJobReport.py '+str(args)+'\n' |
1013 |
|
txt += 'modifyReport_result=$?\n' |
1014 |
|
txt += 'if [ $modifyReport_result -ne 0 ]; then\n' |
1015 |
|
txt += ' modifyReport_result=70500\n' |
1030 |
|
txt += 'if [ -s $RUNTIME_AREA/crab_fjr_$NJob.xml ]; then\n' |
1031 |
|
txt += ' if [ -s $RUNTIME_AREA/parseCrabFjr.py ]; then\n' |
1032 |
|
txt += ' cmd_out=`python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --dashboard $MonitorID,$MonitorJobID '+self.debugWrap+'`\n' |
1033 |
< |
if self.debug_wrapper : |
1033 |
> |
if self.debug_wrapper==1 : |
1034 |
|
txt += ' echo "Result of parsing the FrameworkJobReport crab_fjr.xml: $cmd_out"\n' |
1035 |
|
txt += ' executable_exit_status=`python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --exitcode`\n' |
1036 |
|
txt += ' if [ $executable_exit_status -eq 50115 ];then\n' |
1046 |
|
txt += ' echo "CRAB python script to parse CRAB FrameworkJobReport crab_fjr.xml is not available, using exit code of executable from command line."\n' |
1047 |
|
txt += ' fi\n' |
1048 |
|
#### Patch to check input data reading for CMSSW16x Hopefully we-ll remove it asap |
1049 |
< |
|
1050 |
< |
if (self.datasetPath and not self.dataset_pu ): |
1049 |
> |
txt += ' if [ $executable_exit_status -eq 0 ];then\n' |
1050 |
> |
txt += ' echo ">>> Executable succeded $executable_exit_status"\n' |
1051 |
> |
## This cannot more work given the changes on the Job argumentsJob |
1052 |
> |
""" |
1053 |
> |
if (self.datasetPath and not (self.dataset_pu or self.useParent==1)) : |
1054 |
|
# VERIFY PROCESSED DATA |
1055 |
< |
txt += ' if [ $executable_exit_status -eq 0 ];then\n' |
1056 |
< |
txt += ' echo ">>> Verify list of processed files:"\n' |
1057 |
< |
txt += ' echo $InputFiles |tr -d \'\\\\\' |tr \',\' \'\\n\'|tr -d \'"\' > input-files.txt\n' |
1058 |
< |
txt += ' python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --lfn > processed-files.txt\n' |
1059 |
< |
txt += ' cat input-files.txt | sort | uniq > tmp.txt\n' |
1060 |
< |
txt += ' mv tmp.txt input-files.txt\n' |
1061 |
< |
txt += ' echo "cat input-files.txt"\n' |
1062 |
< |
txt += ' echo "----------------------"\n' |
1063 |
< |
txt += ' cat input-files.txt\n' |
1064 |
< |
txt += ' cat processed-files.txt | sort | uniq > tmp.txt\n' |
1065 |
< |
txt += ' mv tmp.txt processed-files.txt\n' |
1066 |
< |
txt += ' echo "----------------------"\n' |
1067 |
< |
txt += ' echo "cat processed-files.txt"\n' |
1068 |
< |
txt += ' echo "----------------------"\n' |
1069 |
< |
txt += ' cat processed-files.txt\n' |
1070 |
< |
txt += ' echo "----------------------"\n' |
1071 |
< |
txt += ' diff -q input-files.txt processed-files.txt\n' |
1072 |
< |
txt += ' fileverify_status=$?\n' |
1073 |
< |
txt += ' if [ $fileverify_status -ne 0 ]; then\n' |
1074 |
< |
txt += ' executable_exit_status=30001\n' |
1075 |
< |
txt += ' echo "ERROR ==> not all input files processed"\n' |
1076 |
< |
txt += ' echo " ==> list of processed files from crab_fjr.xml differs from list in pset.cfg"\n' |
1077 |
< |
txt += ' echo " ==> diff input-files.txt processed-files.txt"\n' |
1078 |
< |
txt += ' fi\n' |
1079 |
< |
txt += ' fi\n' |
1297 |
< |
txt += '\n' |
1055 |
> |
txt += ' echo ">>> Verify list of processed files:"\n' |
1056 |
> |
txt += ' echo $InputFiles |tr -d \'\\\\\' |tr \',\' \'\\n\'|tr -d \'"\' > input-files.txt\n' |
1057 |
> |
txt += ' python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --lfn > processed-files.txt\n' |
1058 |
> |
txt += ' cat input-files.txt | sort | uniq > tmp.txt\n' |
1059 |
> |
txt += ' mv tmp.txt input-files.txt\n' |
1060 |
> |
txt += ' echo "cat input-files.txt"\n' |
1061 |
> |
txt += ' echo "----------------------"\n' |
1062 |
> |
txt += ' cat input-files.txt\n' |
1063 |
> |
txt += ' cat processed-files.txt | sort | uniq > tmp.txt\n' |
1064 |
> |
txt += ' mv tmp.txt processed-files.txt\n' |
1065 |
> |
txt += ' echo "----------------------"\n' |
1066 |
> |
txt += ' echo "cat processed-files.txt"\n' |
1067 |
> |
txt += ' echo "----------------------"\n' |
1068 |
> |
txt += ' cat processed-files.txt\n' |
1069 |
> |
txt += ' echo "----------------------"\n' |
1070 |
> |
txt += ' diff -qbB input-files.txt processed-files.txt\n' |
1071 |
> |
txt += ' fileverify_status=$?\n' |
1072 |
> |
txt += ' if [ $fileverify_status -ne 0 ]; then\n' |
1073 |
> |
txt += ' executable_exit_status=30001\n' |
1074 |
> |
txt += ' echo "ERROR ==> not all input files processed"\n' |
1075 |
> |
txt += ' echo " ==> list of processed files from crab_fjr.xml differs from list in pset.cfg"\n' |
1076 |
> |
txt += ' echo " ==> diff input-files.txt processed-files.txt"\n' |
1077 |
> |
txt += ' fi\n' |
1078 |
> |
""" |
1079 |
> |
txt += ' fi\n' |
1080 |
|
txt += 'else\n' |
1081 |
|
txt += ' echo "CRAB FrameworkJobReport crab_fjr.xml is not available, using exit code of executable from command line."\n' |
1082 |
|
txt += 'fi\n' |
1083 |
|
txt += '\n' |
1084 |
+ |
txt += 'if [ $executable_exit_status -ne 0 ] && [ $executable_exit_status -ne 50115 ] && [ $executable_exit_status -ne 50117 ] && [ $executable_exit_status -ne 30001 ];then\n' |
1085 |
+ |
txt += ' echo ">>> Executable failed $executable_exit_status"\n' |
1086 |
+ |
txt += ' echo "ExeExitCode=$executable_exit_status" | tee -a $RUNTIME_AREA/$repo\n' |
1087 |
+ |
txt += ' echo "EXECUTABLE_EXIT_STATUS = $executable_exit_status"\n' |
1088 |
+ |
txt += ' job_exit_code=$executable_exit_status\n' |
1089 |
+ |
txt += ' func_exit\n' |
1090 |
+ |
txt += 'fi\n\n' |
1091 |
|
txt += 'echo "ExeExitCode=$executable_exit_status" | tee -a $RUNTIME_AREA/$repo\n' |
1092 |
|
txt += 'echo "EXECUTABLE_EXIT_STATUS = $executable_exit_status"\n' |
1093 |
|
txt += 'job_exit_code=$executable_exit_status\n' |
1100 |
|
def getParams(self): |
1101 |
|
return self._params |
1102 |
|
|
1103 |
< |
def uniquelist(self, old): |
1315 |
< |
""" |
1316 |
< |
remove duplicates from a list |
1317 |
< |
""" |
1318 |
< |
nd={} |
1319 |
< |
for e in old: |
1320 |
< |
nd[e]=0 |
1321 |
< |
return nd.keys() |
1322 |
< |
|
1323 |
< |
def outList(self): |
1103 |
> |
def outList(self,list=False): |
1104 |
|
""" |
1105 |
|
check the dimension of the output files |
1106 |
|
""" |
1109 |
|
listOutFiles = [] |
1110 |
|
stdout = 'CMSSW_$NJob.stdout' |
1111 |
|
stderr = 'CMSSW_$NJob.stderr' |
1112 |
+ |
if len(self.output_file) <= 0: |
1113 |
+ |
msg ="WARNING: no output files name have been defined!!\n" |
1114 |
+ |
msg+="\tno output files will be reported back/staged\n" |
1115 |
+ |
common.logger.info(msg) |
1116 |
|
if (self.return_data == 1): |
1117 |
|
for file in (self.output_file+self.output_file_sandbox): |
1118 |
|
listOutFiles.append(numberFile(file, '$NJob')) |
1126 |
|
txt += 'echo "output files: '+string.join(listOutFiles,' ')+'"\n' |
1127 |
|
txt += 'filesToCheck="'+string.join(listOutFiles,' ')+'"\n' |
1128 |
|
txt += 'export filesToCheck\n' |
1129 |
+ |
|
1130 |
+ |
if list : return self.output_file |
1131 |
|
return txt |