2 |
|
from crab_logger import Logger |
3 |
|
from crab_exceptions import * |
4 |
|
from crab_util import * |
5 |
– |
import math |
5 |
|
import common |
7 |
– |
import PsetManipulator |
8 |
– |
|
9 |
– |
import DBSInfo |
10 |
– |
import DataDiscovery |
11 |
– |
import DataLocation |
6 |
|
import Scram |
7 |
|
|
8 |
< |
import glob, os, string, re |
8 |
> |
import os, string, glob |
9 |
|
|
10 |
|
class Cmssw(JobType): |
11 |
|
def __init__(self, cfg_params, ncjobs): |
12 |
|
JobType.__init__(self, 'CMSSW') |
13 |
|
common.logger.debug(3,'CMSSW::__init__') |
14 |
|
|
21 |
– |
# Marco. |
15 |
|
self._params = {} |
16 |
|
self.cfg_params = cfg_params |
17 |
|
|
18 |
+ |
try: |
19 |
+ |
self.MaxTarBallSize = float(self.cfg_params['EDG.maxtarballsize']) |
20 |
+ |
except KeyError: |
21 |
+ |
self.MaxTarBallSize = 9.5 |
22 |
+ |
|
23 |
|
# number of jobs requested to be created, limit obj splitting |
24 |
|
self.ncjobs = ncjobs |
25 |
|
|
26 |
|
log = common.logger |
27 |
|
|
28 |
|
self.scram = Scram.Scram(cfg_params) |
31 |
– |
scramArea = '' |
29 |
|
self.additional_inbox_files = [] |
30 |
|
self.scriptExe = '' |
31 |
|
self.executable = '' |
32 |
+ |
self.executable_arch = self.scram.getArch() |
33 |
|
self.tgz_name = 'default.tgz' |
34 |
+ |
self.additional_tgz_name = 'additional.tgz' |
35 |
+ |
self.scriptName = 'CMSSW.sh' |
36 |
|
self.pset = '' #scrip use case Da |
37 |
|
self.datasetPath = '' #scrip use case Da |
38 |
|
|
44 |
|
self.setParam_('application', self.version) |
45 |
|
|
46 |
|
### collect Data cards |
47 |
+ |
|
48 |
+ |
## get DBS mode |
49 |
+ |
try: |
50 |
+ |
self.use_dbs_1 = int(self.cfg_params['CMSSW.use_dbs_1']) |
51 |
+ |
except KeyError: |
52 |
+ |
self.use_dbs_1 = 0 |
53 |
+ |
|
54 |
|
try: |
55 |
|
tmp = cfg_params['CMSSW.datasetpath'] |
56 |
|
log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp) |
70 |
|
self.setParam_('dataset', 'None') |
71 |
|
self.setParam_('owner', 'None') |
72 |
|
else: |
73 |
< |
datasetpath_split = self.datasetPath.split("/") |
74 |
< |
self.setParam_('dataset', datasetpath_split[1]) |
75 |
< |
self.setParam_('owner', datasetpath_split[-1]) |
76 |
< |
|
73 |
> |
try: |
74 |
> |
datasetpath_split = self.datasetPath.split("/") |
75 |
> |
# standard style |
76 |
> |
if self.use_dbs_1 == 1 : |
77 |
> |
self.setParam_('dataset', datasetpath_split[1]) |
78 |
> |
self.setParam_('owner', datasetpath_split[-1]) |
79 |
> |
else: |
80 |
> |
self.setParam_('dataset', datasetpath_split[1]) |
81 |
> |
self.setParam_('owner', datasetpath_split[2]) |
82 |
> |
except: |
83 |
> |
self.setParam_('dataset', self.datasetPath) |
84 |
> |
self.setParam_('owner', self.datasetPath) |
85 |
> |
|
86 |
|
self.setTaskid_() |
87 |
|
self.setParam_('taskId', self.cfg_params['taskId']) |
88 |
|
|
123 |
|
# other output files to be returned via sandbox or copied to SE |
124 |
|
try: |
125 |
|
self.output_file = [] |
110 |
– |
|
111 |
– |
|
126 |
|
tmp = cfg_params['CMSSW.output_file'] |
127 |
|
if tmp != '': |
128 |
|
tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',') |
132 |
|
self.output_file.append(tmp) |
133 |
|
pass |
134 |
|
else: |
135 |
< |
log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available") |
135 |
> |
log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n") |
136 |
|
pass |
137 |
|
pass |
138 |
|
except KeyError: |
139 |
< |
log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available") |
139 |
> |
log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n") |
140 |
|
pass |
141 |
|
|
142 |
|
# script_exe file as additional file in inputSandbox |
144 |
|
self.scriptExe = cfg_params['USER.script_exe'] |
145 |
|
if self.scriptExe != '': |
146 |
|
if not os.path.isfile(self.scriptExe): |
147 |
< |
msg ="WARNING. file "+self.scriptExe+" not found" |
147 |
> |
msg ="ERROR. file "+self.scriptExe+" not found" |
148 |
|
raise CrabException(msg) |
149 |
|
self.additional_inbox_files.append(string.strip(self.scriptExe)) |
150 |
|
except KeyError: |
151 |
|
self.scriptExe = '' |
152 |
+ |
|
153 |
|
#CarlosDaniele |
154 |
|
if self.datasetPath == None and self.pset == None and self.scriptExe == '' : |
155 |
< |
msg ="WARNING. script_exe not defined" |
155 |
> |
msg ="Error. script_exe not defined" |
156 |
|
raise CrabException(msg) |
157 |
|
|
158 |
|
## additional input files |
162 |
|
tmp = string.strip(tmp) |
163 |
|
dirname = '' |
164 |
|
if not tmp[0]=="/": dirname = "." |
165 |
< |
files = glob.glob(os.path.join(dirname, tmp)) |
165 |
> |
files = [] |
166 |
> |
if string.find(tmp,"*")>-1: |
167 |
> |
files = glob.glob(os.path.join(dirname, tmp)) |
168 |
> |
if len(files)==0: |
169 |
> |
raise CrabException("No additional input file found with this pattern: "+tmp) |
170 |
> |
else: |
171 |
> |
files.append(tmp) |
172 |
|
for file in files: |
173 |
|
if not os.path.exists(file): |
174 |
|
raise CrabException("Additional input file not found: "+file) |
175 |
|
pass |
176 |
+ |
# fname = string.split(file, '/')[-1] |
177 |
+ |
# storedFile = common.work_space.pathForTgz()+'share/'+fname |
178 |
+ |
# shutil.copyfile(file, storedFile) |
179 |
|
self.additional_inbox_files.append(string.strip(file)) |
180 |
|
pass |
181 |
|
pass |
234 |
|
except KeyError: |
235 |
|
self.sourceSeedVtx = None |
236 |
|
common.logger.debug(5,"No vertex seed given") |
237 |
+ |
|
238 |
+ |
try: |
239 |
+ |
self.sourceSeedG4 = int(cfg_params['CMSSW.g4_seed']) |
240 |
+ |
except KeyError: |
241 |
+ |
self.sourceSeedG4 = None |
242 |
+ |
common.logger.debug(5,"No g4 sim hits seed given") |
243 |
+ |
|
244 |
+ |
try: |
245 |
+ |
self.sourceSeedMix = int(cfg_params['CMSSW.mix_seed']) |
246 |
+ |
except KeyError: |
247 |
+ |
self.sourceSeedMix = None |
248 |
+ |
common.logger.debug(5,"No mix seed given") |
249 |
+ |
|
250 |
+ |
try: |
251 |
+ |
self.firstRun = int(cfg_params['CMSSW.first_run']) |
252 |
+ |
except KeyError: |
253 |
+ |
self.firstRun = None |
254 |
+ |
common.logger.debug(5,"No first run given") |
255 |
|
if self.pset != None: #CarlosDaniele |
256 |
< |
self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset |
256 |
> |
ver = string.split(self.version,"_") |
257 |
> |
if (int(ver[1])>=1 and int(ver[2])>=5): |
258 |
> |
import PsetManipulator150 as pp |
259 |
> |
else: |
260 |
> |
import PsetManipulator as pp |
261 |
> |
PsetEdit = pp.PsetManipulator(self.pset) #Daniele Pset |
262 |
|
|
263 |
|
#DBSDLS-start |
264 |
|
## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code |
280 |
|
self.jobSplittingForScript() |
281 |
|
else: |
282 |
|
self.jobSplittingNoInput() |
283 |
< |
else: self.jobSplittingByBlocks(blockSites) |
283 |
> |
else: |
284 |
> |
self.jobSplittingByBlocks(blockSites) |
285 |
|
|
286 |
|
# modify Pset |
287 |
|
if self.pset != None: #CarlosDaniele |
288 |
|
try: |
289 |
|
if (self.datasetPath): # standard job |
290 |
|
# allow to processa a fraction of events in a file |
291 |
< |
self.PsetEdit.inputModule("INPUT") |
292 |
< |
self.PsetEdit.maxEvent("INPUTMAXEVENTS") |
293 |
< |
self.PsetEdit.skipEvent("INPUTSKIPEVENTS") |
291 |
> |
PsetEdit.inputModule("INPUT") |
292 |
> |
PsetEdit.maxEvent("INPUTMAXEVENTS") |
293 |
> |
PsetEdit.skipEvent("INPUTSKIPEVENTS") |
294 |
|
else: # pythia like job |
295 |
< |
self.PsetEdit.maxEvent(self.eventsPerJob) |
295 |
> |
PsetEdit.maxEvent(self.eventsPerJob) |
296 |
> |
if (self.firstRun): |
297 |
> |
PsetEdit.pythiaFirstRun("INPUTFIRSTRUN") #First Run |
298 |
|
if (self.sourceSeed) : |
299 |
< |
self.PsetEdit.pythiaSeed("INPUT") |
299 |
> |
PsetEdit.pythiaSeed("INPUT") |
300 |
|
if (self.sourceSeedVtx) : |
301 |
< |
self.PsetEdit.pythiaSeedVtx("INPUTVTX") |
301 |
> |
PsetEdit.vtxSeed("INPUTVTX") |
302 |
> |
if (self.sourceSeedG4) : |
303 |
> |
self.PsetEdit.g4Seed("INPUTG4") |
304 |
> |
if (self.sourceSeedMix) : |
305 |
> |
self.PsetEdit.mixSeed("INPUTMIX") |
306 |
|
# add FrameworkJobReport to parameter-set |
307 |
< |
self.PsetEdit.addCrabFJR(self.fjrFileName) |
308 |
< |
self.PsetEdit.psetWriter(self.configFilename()) |
307 |
> |
PsetEdit.addCrabFJR(self.fjrFileName) |
308 |
> |
PsetEdit.psetWriter(self.configFilename()) |
309 |
|
except: |
310 |
|
msg='Error while manipuliating ParameterSet: exiting...' |
311 |
|
raise CrabException(msg) |
312 |
|
|
313 |
|
def DataDiscoveryAndLocation(self, cfg_params): |
314 |
|
|
315 |
+ |
import DataDiscovery |
316 |
+ |
import DataDiscovery_DBS2 |
317 |
+ |
import DataLocation |
318 |
|
common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()") |
319 |
|
|
320 |
|
datasetPath=self.datasetPath |
321 |
|
|
265 |
– |
## TODO |
266 |
– |
dataTiersList = "" |
267 |
– |
dataTiers = dataTiersList.split(',') |
268 |
– |
|
322 |
|
## Contact the DBS |
323 |
< |
common.logger.message("Contacting DBS...") |
323 |
> |
common.logger.message("Contacting Data Discovery Services ...") |
324 |
|
try: |
325 |
< |
self.pubdata=DataDiscovery.DataDiscovery(datasetPath, dataTiers, cfg_params) |
325 |
> |
|
326 |
> |
if self.use_dbs_1 == 1 : |
327 |
> |
self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params) |
328 |
> |
else : |
329 |
> |
self.pubdata=DataDiscovery_DBS2.DataDiscovery_DBS2(datasetPath, cfg_params) |
330 |
|
self.pubdata.fetchDBSInfo() |
331 |
|
|
332 |
|
except DataDiscovery.NotExistingDatasetError, ex : |
333 |
|
msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage() |
334 |
|
raise CrabException(msg) |
278 |
– |
|
335 |
|
except DataDiscovery.NoDataTierinProvenanceError, ex : |
336 |
|
msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage() |
337 |
|
raise CrabException(msg) |
338 |
|
except DataDiscovery.DataDiscoveryError, ex: |
339 |
< |
msg = 'ERROR ***: failed Data Discovery in DBS %s'%ex.getErrorMessage() |
339 |
> |
msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage() |
340 |
> |
raise CrabException(msg) |
341 |
> |
except DataDiscovery_DBS2.NotExistingDatasetError_DBS2, ex : |
342 |
> |
msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage() |
343 |
> |
raise CrabException(msg) |
344 |
> |
except DataDiscovery_DBS2.NoDataTierinProvenanceError_DBS2, ex : |
345 |
> |
msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage() |
346 |
> |
raise CrabException(msg) |
347 |
> |
except DataDiscovery_DBS2.DataDiscoveryError_DBS2, ex: |
348 |
> |
msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage() |
349 |
|
raise CrabException(msg) |
285 |
– |
|
286 |
– |
## get list of all required data in the form of dbs paths (dbs path = /dataset/datatier/owner) |
287 |
– |
## self.DBSPaths=self.pubdata.getDBSPaths() |
288 |
– |
common.logger.message("Required data are :"+self.datasetPath) |
350 |
|
|
351 |
|
self.filesbyblock=self.pubdata.getFiles() |
352 |
|
self.eventsbyblock=self.pubdata.getEventsPerBlock() |
353 |
|
self.eventsbyfile=self.pubdata.getEventsPerFile() |
293 |
– |
# print str(self.filesbyblock) |
294 |
– |
# print 'self.eventsbyfile',len(self.eventsbyfile) |
295 |
– |
# print str(self.eventsbyfile) |
354 |
|
|
355 |
|
## get max number of events |
356 |
|
self.maxEvents=self.pubdata.getMaxEvents() ## self.maxEvents used in Creator.py |
299 |
– |
common.logger.message("The number of available events is %s\n"%self.maxEvents) |
357 |
|
|
301 |
– |
common.logger.message("Contacting DLS...") |
358 |
|
## Contact the DLS and build a list of sites hosting the fileblocks |
359 |
|
try: |
360 |
|
dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params) |
367 |
|
sites = dataloc.getSites() |
368 |
|
allSites = [] |
369 |
|
listSites = sites.values() |
370 |
< |
for list in listSites: |
371 |
< |
for oneSite in list: |
370 |
> |
for listSite in listSites: |
371 |
> |
for oneSite in listSite: |
372 |
|
allSites.append(oneSite) |
373 |
|
allSites = self.uniquelist(allSites) |
374 |
|
|
375 |
< |
common.logger.message("Sites ("+str(len(allSites))+") hosting part/all of dataset: "+str(allSites)) |
376 |
< |
common.logger.debug(6, "List of Sites: "+str(allSites)) |
375 |
> |
# screen output |
376 |
> |
common.logger.message("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n") |
377 |
> |
|
378 |
|
return sites |
379 |
|
|
380 |
|
def jobSplittingByBlocks(self, blockSites): |
436 |
|
jobCount = 0 |
437 |
|
list_of_lists = [] |
438 |
|
|
439 |
+ |
# list tracking which jobs are in which jobs belong to which block |
440 |
+ |
jobsOfBlock = {} |
441 |
+ |
|
442 |
|
# ---- Iterate over the blocks in the dataset until ---- # |
443 |
|
# ---- we've met the requested total # of events ---- # |
444 |
|
while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)): |
445 |
|
block = blocks[blockCount] |
446 |
|
blockCount += 1 |
447 |
+ |
if block not in jobsOfBlock.keys() : |
448 |
+ |
jobsOfBlock[block] = [] |
449 |
|
|
450 |
< |
|
451 |
< |
numEventsInBlock = self.eventsbyblock[block] |
452 |
< |
common.logger.debug(5,'Events in Block File '+str(numEventsInBlock)) |
450 |
> |
if self.eventsbyblock.has_key(block) : |
451 |
> |
numEventsInBlock = self.eventsbyblock[block] |
452 |
> |
common.logger.debug(5,'Events in Block File '+str(numEventsInBlock)) |
453 |
|
|
454 |
< |
files = self.filesbyblock[block] |
455 |
< |
numFilesInBlock = len(files) |
456 |
< |
if (numFilesInBlock <= 0): |
457 |
< |
continue |
458 |
< |
fileCount = 0 |
459 |
< |
|
460 |
< |
# ---- New block => New job ---- # |
461 |
< |
parString = "\\{" |
462 |
< |
# counter for number of events in files currently worked on |
463 |
< |
filesEventCount = 0 |
464 |
< |
# flag if next while loop should touch new file |
465 |
< |
newFile = 1 |
466 |
< |
# job event counter |
467 |
< |
jobSkipEventCount = 0 |
454 |
> |
files = self.filesbyblock[block] |
455 |
> |
numFilesInBlock = len(files) |
456 |
> |
if (numFilesInBlock <= 0): |
457 |
> |
continue |
458 |
> |
fileCount = 0 |
459 |
> |
|
460 |
> |
# ---- New block => New job ---- # |
461 |
> |
parString = "\\{" |
462 |
> |
# counter for number of events in files currently worked on |
463 |
> |
filesEventCount = 0 |
464 |
> |
# flag if next while loop should touch new file |
465 |
> |
newFile = 1 |
466 |
> |
# job event counter |
467 |
> |
jobSkipEventCount = 0 |
468 |
|
|
469 |
< |
# ---- Iterate over the files in the block until we've met the requested ---- # |
470 |
< |
# ---- total # of events or we've gone over all the files in this block ---- # |
471 |
< |
while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ): |
472 |
< |
file = files[fileCount] |
473 |
< |
if newFile : |
474 |
< |
try: |
475 |
< |
numEventsInFile = self.eventsbyfile[file] |
476 |
< |
common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events") |
477 |
< |
# increase filesEventCount |
478 |
< |
filesEventCount += numEventsInFile |
479 |
< |
# Add file to current job |
480 |
< |
parString += '\\\"' + file + '\\\"\,' |
481 |
< |
newFile = 0 |
482 |
< |
except KeyError: |
483 |
< |
common.logger.message("File "+str(file)+" has unknown number of events: skipping") |
469 |
> |
# ---- Iterate over the files in the block until we've met the requested ---- # |
470 |
> |
# ---- total # of events or we've gone over all the files in this block ---- # |
471 |
> |
while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ): |
472 |
> |
file = files[fileCount] |
473 |
> |
if newFile : |
474 |
> |
try: |
475 |
> |
numEventsInFile = self.eventsbyfile[file] |
476 |
> |
common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events") |
477 |
> |
# increase filesEventCount |
478 |
> |
filesEventCount += numEventsInFile |
479 |
> |
# Add file to current job |
480 |
> |
parString += '\\\"' + file + '\\\"\,' |
481 |
> |
newFile = 0 |
482 |
> |
except KeyError: |
483 |
> |
common.logger.message("File "+str(file)+" has unknown number of events: skipping") |
484 |
|
|
485 |
|
|
486 |
< |
# if less events in file remain than eventsPerJobRequested |
487 |
< |
if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) : |
488 |
< |
# if last file in block |
489 |
< |
if ( fileCount == numFilesInBlock-1 ) : |
490 |
< |
# end job using last file, use remaining events in block |
486 |
> |
# if less events in file remain than eventsPerJobRequested |
487 |
> |
if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) : |
488 |
> |
# if last file in block |
489 |
> |
if ( fileCount == numFilesInBlock-1 ) : |
490 |
> |
# end job using last file, use remaining events in block |
491 |
> |
# close job and touch new file |
492 |
> |
fullString = parString[:-2] |
493 |
> |
fullString += '\\}' |
494 |
> |
list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)]) |
495 |
> |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).") |
496 |
> |
self.jobDestination.append(blockSites[block]) |
497 |
> |
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
498 |
> |
# fill jobs of block dictionary |
499 |
> |
jobsOfBlock[block].append(jobCount+1) |
500 |
> |
# reset counter |
501 |
> |
jobCount = jobCount + 1 |
502 |
> |
totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount |
503 |
> |
eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount |
504 |
> |
jobSkipEventCount = 0 |
505 |
> |
# reset file |
506 |
> |
parString = "\\{" |
507 |
> |
filesEventCount = 0 |
508 |
> |
newFile = 1 |
509 |
> |
fileCount += 1 |
510 |
> |
else : |
511 |
> |
# go to next file |
512 |
> |
newFile = 1 |
513 |
> |
fileCount += 1 |
514 |
> |
# if events in file equal to eventsPerJobRequested |
515 |
> |
elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) : |
516 |
|
# close job and touch new file |
517 |
|
fullString = parString[:-2] |
518 |
|
fullString += '\\}' |
519 |
< |
list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)]) |
520 |
< |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).") |
519 |
> |
list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
520 |
> |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.") |
521 |
|
self.jobDestination.append(blockSites[block]) |
522 |
|
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
523 |
+ |
jobsOfBlock[block].append(jobCount+1) |
524 |
|
# reset counter |
525 |
|
jobCount = jobCount + 1 |
526 |
< |
totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount |
527 |
< |
eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount |
526 |
> |
totalEventCount = totalEventCount + eventsPerJobRequested |
527 |
> |
eventsRemaining = eventsRemaining - eventsPerJobRequested |
528 |
|
jobSkipEventCount = 0 |
529 |
|
# reset file |
530 |
|
parString = "\\{" |
531 |
|
filesEventCount = 0 |
532 |
|
newFile = 1 |
533 |
|
fileCount += 1 |
534 |
+ |
|
535 |
+ |
# if more events in file remain than eventsPerJobRequested |
536 |
|
else : |
537 |
< |
# go to next file |
538 |
< |
newFile = 1 |
539 |
< |
fileCount += 1 |
540 |
< |
# if events in file equal to eventsPerJobRequested |
541 |
< |
elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) : |
542 |
< |
# close job and touch new file |
543 |
< |
fullString = parString[:-2] |
544 |
< |
fullString += '\\}' |
545 |
< |
list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
546 |
< |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.") |
547 |
< |
self.jobDestination.append(blockSites[block]) |
548 |
< |
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
549 |
< |
# reset counter |
550 |
< |
jobCount = jobCount + 1 |
551 |
< |
totalEventCount = totalEventCount + eventsPerJobRequested |
552 |
< |
eventsRemaining = eventsRemaining - eventsPerJobRequested |
553 |
< |
jobSkipEventCount = 0 |
554 |
< |
# reset file |
555 |
< |
parString = "\\{" |
556 |
< |
filesEventCount = 0 |
557 |
< |
newFile = 1 |
468 |
< |
fileCount += 1 |
469 |
< |
|
470 |
< |
# if more events in file remain than eventsPerJobRequested |
471 |
< |
else : |
472 |
< |
# close job but don't touch new file |
473 |
< |
fullString = parString[:-2] |
474 |
< |
fullString += '\\}' |
475 |
< |
list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
476 |
< |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.") |
477 |
< |
self.jobDestination.append(blockSites[block]) |
478 |
< |
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
479 |
< |
# increase counter |
480 |
< |
jobCount = jobCount + 1 |
481 |
< |
totalEventCount = totalEventCount + eventsPerJobRequested |
482 |
< |
eventsRemaining = eventsRemaining - eventsPerJobRequested |
483 |
< |
# calculate skip events for last file |
484 |
< |
# use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest |
485 |
< |
jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file]) |
486 |
< |
# remove all but the last file |
487 |
< |
filesEventCount = self.eventsbyfile[file] |
488 |
< |
parString = "\\{" |
489 |
< |
parString += '\\\"' + file + '\\\"\,' |
490 |
< |
pass # END if |
491 |
< |
pass # END while (iterate over files in the block) |
537 |
> |
# close job but don't touch new file |
538 |
> |
fullString = parString[:-2] |
539 |
> |
fullString += '\\}' |
540 |
> |
list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)]) |
541 |
> |
common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.") |
542 |
> |
self.jobDestination.append(blockSites[block]) |
543 |
> |
common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount])) |
544 |
> |
jobsOfBlock[block].append(jobCount+1) |
545 |
> |
# increase counter |
546 |
> |
jobCount = jobCount + 1 |
547 |
> |
totalEventCount = totalEventCount + eventsPerJobRequested |
548 |
> |
eventsRemaining = eventsRemaining - eventsPerJobRequested |
549 |
> |
# calculate skip events for last file |
550 |
> |
# use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest |
551 |
> |
jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file]) |
552 |
> |
# remove all but the last file |
553 |
> |
filesEventCount = self.eventsbyfile[file] |
554 |
> |
parString = "\\{" |
555 |
> |
parString += '\\\"' + file + '\\\"\,' |
556 |
> |
pass # END if |
557 |
> |
pass # END while (iterate over files in the block) |
558 |
|
pass # END while (iterate over blocks in the dataset) |
559 |
|
self.ncjobs = self.total_number_of_jobs = jobCount |
560 |
|
if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ): |
561 |
|
common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.") |
562 |
< |
common.logger.message("\n"+str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n") |
562 |
> |
common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n") |
563 |
|
|
564 |
+ |
# screen output |
565 |
+ |
screenOutput = "List of jobs and available destination sites:\n\n" |
566 |
+ |
|
567 |
+ |
blockCounter = 0 |
568 |
+ |
for block in blocks: |
569 |
+ |
if block in jobsOfBlock.keys() : |
570 |
+ |
blockCounter += 1 |
571 |
+ |
screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),','.join(blockSites[block])) |
572 |
+ |
|
573 |
+ |
common.logger.message(screenOutput) |
574 |
+ |
|
575 |
|
self.list_of_args = list_of_lists |
576 |
|
return |
577 |
|
|
589 |
|
raise CrabException(msg) |
590 |
|
|
591 |
|
if (self.selectEventsPerJob): |
592 |
< |
self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob) |
592 |
> |
if (self.selectTotalNumberEvents): |
593 |
> |
self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob) |
594 |
> |
elif(self.selectNumberOfJobs) : |
595 |
> |
self.total_number_of_jobs =self.theNumberOfJobs |
596 |
> |
self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob) |
597 |
> |
|
598 |
|
elif (self.selectNumberOfJobs) : |
599 |
|
self.total_number_of_jobs = self.theNumberOfJobs |
600 |
|
self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs) |
601 |
< |
|
601 |
> |
|
602 |
|
common.logger.debug(5,'N jobs '+str(self.total_number_of_jobs)) |
603 |
|
|
604 |
|
# is there any remainder? |
616 |
|
## Since there is no input, any site is good |
617 |
|
# self.jobDestination.append(["Any"]) |
618 |
|
self.jobDestination.append([""]) #must be empty to write correctly the xml |
619 |
+ |
args=[] |
620 |
+ |
if (self.firstRun): |
621 |
+ |
## pythia first run |
622 |
+ |
#self.list_of_args.append([(str(self.firstRun)+str(i))]) |
623 |
+ |
args.append(str(self.firstRun)+str(i)) |
624 |
+ |
else: |
625 |
+ |
## no first run |
626 |
+ |
#self.list_of_args.append([str(i)]) |
627 |
+ |
args.append(str(i)) |
628 |
|
if (self.sourceSeed): |
629 |
+ |
args.append(str(self.sourceSeed)+str(i)) |
630 |
|
if (self.sourceSeedVtx): |
631 |
< |
## pythia + vtx random seed |
632 |
< |
self.list_of_args.append([ |
633 |
< |
str(self.sourceSeed)+str(i), |
634 |
< |
str(self.sourceSeedVtx)+str(i) |
635 |
< |
]) |
636 |
< |
else: |
637 |
< |
## only pythia random seed |
638 |
< |
self.list_of_args.append([(str(self.sourceSeed)+str(i))]) |
639 |
< |
else: |
640 |
< |
## no random seed |
641 |
< |
self.list_of_args.append([str(i)]) |
642 |
< |
#print self.list_of_args |
631 |
> |
## + vtx random seed |
632 |
> |
args.append(str(self.sourceSeedVtx)+str(i)) |
633 |
> |
if (self.sourceSeedG4): |
634 |
> |
## + G4 random seed |
635 |
> |
args.append(str(self.sourceSeedG4)+str(i)) |
636 |
> |
if (self.sourceSeedMix): |
637 |
> |
## + Mix random seed |
638 |
> |
args.append(str(self.sourceSeedMix)+str(i)) |
639 |
> |
pass |
640 |
> |
pass |
641 |
> |
self.list_of_args.append(args) |
642 |
> |
pass |
643 |
> |
|
644 |
> |
# print self.list_of_args |
645 |
|
|
646 |
|
return |
647 |
|
|
706 |
|
""" |
707 |
|
|
708 |
|
# if it exist, just return it |
709 |
< |
self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name |
709 |
> |
# |
710 |
> |
# Marco. Let's start to use relative path for Boss XML files |
711 |
> |
# |
712 |
> |
self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name |
713 |
|
if os.path.exists(self.tgzNameWithPath): |
714 |
|
return self.tgzNameWithPath |
715 |
|
|
723 |
|
# First of all declare the user Scram area |
724 |
|
swArea = self.scram.getSWArea_() |
725 |
|
#print "swArea = ", swArea |
726 |
< |
swVersion = self.scram.getSWVersion() |
727 |
< |
#print "swVersion = ", swVersion |
726 |
> |
# swVersion = self.scram.getSWVersion() |
727 |
> |
# print "swVersion = ", swVersion |
728 |
|
swReleaseTop = self.scram.getReleaseTop_() |
729 |
|
#print "swReleaseTop = ", swReleaseTop |
730 |
|
|
732 |
|
if swReleaseTop == '' or swArea == swReleaseTop: |
733 |
|
return |
734 |
|
|
735 |
< |
filesToBeTarred = [] |
736 |
< |
## First find the executable |
737 |
< |
if (self.executable != ''): |
738 |
< |
exeWithPath = self.scram.findFile_(executable) |
739 |
< |
# print exeWithPath |
740 |
< |
if ( not exeWithPath ): |
741 |
< |
raise CrabException('User executable '+executable+' not found') |
742 |
< |
|
743 |
< |
## then check if it's private or not |
744 |
< |
if exeWithPath.find(swReleaseTop) == -1: |
745 |
< |
# the exe is private, so we must ship |
746 |
< |
common.logger.debug(5,"Exe "+exeWithPath+" to be tarred") |
747 |
< |
path = swArea+'/' |
748 |
< |
exe = string.replace(exeWithPath, path,'') |
749 |
< |
filesToBeTarred.append(exe) |
750 |
< |
pass |
751 |
< |
else: |
752 |
< |
# the exe is from release, we'll find it on WN |
753 |
< |
pass |
754 |
< |
|
755 |
< |
## Now get the libraries: only those in local working area |
756 |
< |
libDir = 'lib' |
757 |
< |
lib = swArea+'/' +libDir |
758 |
< |
common.logger.debug(5,"lib "+lib+" to be tarred") |
759 |
< |
if os.path.exists(lib): |
760 |
< |
filesToBeTarred.append(libDir) |
761 |
< |
|
762 |
< |
## Now check if module dir is present |
763 |
< |
moduleDir = 'module' |
764 |
< |
if os.path.isdir(swArea+'/'+moduleDir): |
765 |
< |
filesToBeTarred.append(moduleDir) |
766 |
< |
|
767 |
< |
## Now check if the Data dir is present |
768 |
< |
dataDir = 'src/Data/' |
769 |
< |
if os.path.isdir(swArea+'/'+dataDir): |
770 |
< |
filesToBeTarred.append(dataDir) |
771 |
< |
|
772 |
< |
## copy ProdAgent dir to swArea |
773 |
< |
cmd = '\cp -rf ' + os.environ['CRABDIR'] + '/ProdAgentApi ' + swArea |
774 |
< |
cmd_out = runCommand(cmd) |
775 |
< |
if cmd_out != '': |
776 |
< |
common.logger.message('ProdAgentApi directory could not be copied to local CMSSW project directory.') |
777 |
< |
common.logger.message('No FrameworkJobreport parsing is possible on the WorkerNode.') |
778 |
< |
|
779 |
< |
## Now check if the Data dir is present |
780 |
< |
paDir = 'ProdAgentApi' |
781 |
< |
if os.path.isdir(swArea+'/'+paDir): |
782 |
< |
filesToBeTarred.append(paDir) |
783 |
< |
|
784 |
< |
## Create the tar-ball |
785 |
< |
if len(filesToBeTarred)>0: |
786 |
< |
cwd = os.getcwd() |
787 |
< |
os.chdir(swArea) |
788 |
< |
tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' ' |
789 |
< |
for line in filesToBeTarred: |
790 |
< |
tarcmd = tarcmd + line + ' ' |
791 |
< |
cout = runCommand(tarcmd) |
792 |
< |
if not cout: |
793 |
< |
raise CrabException('Could not create tar-ball') |
794 |
< |
os.chdir(cwd) |
795 |
< |
else: |
796 |
< |
common.logger.debug(5,"No files to be to be tarred") |
735 |
> |
import tarfile |
736 |
> |
try: # create tar ball |
737 |
> |
tar = tarfile.open(self.tgzNameWithPath, "w:gz") |
738 |
> |
## First find the executable |
739 |
> |
if (self.executable != ''): |
740 |
> |
exeWithPath = self.scram.findFile_(executable) |
741 |
> |
if ( not exeWithPath ): |
742 |
> |
raise CrabException('User executable '+executable+' not found') |
743 |
> |
|
744 |
> |
## then check if it's private or not |
745 |
> |
if exeWithPath.find(swReleaseTop) == -1: |
746 |
> |
# the exe is private, so we must ship |
747 |
> |
common.logger.debug(5,"Exe "+exeWithPath+" to be tarred") |
748 |
> |
path = swArea+'/' |
749 |
> |
# distinguish case when script is in user project area or given by full path somewhere else |
750 |
> |
if exeWithPath.find(path) >= 0 : |
751 |
> |
exe = string.replace(exeWithPath, path,'') |
752 |
> |
tar.add(path+exe,os.path.basename(executable)) |
753 |
> |
else : |
754 |
> |
tar.add(exeWithPath,os.path.basename(executable)) |
755 |
> |
pass |
756 |
> |
else: |
757 |
> |
# the exe is from release, we'll find it on WN |
758 |
> |
pass |
759 |
> |
|
760 |
> |
## Now get the libraries: only those in local working area |
761 |
> |
libDir = 'lib' |
762 |
> |
lib = swArea+'/' +libDir |
763 |
> |
common.logger.debug(5,"lib "+lib+" to be tarred") |
764 |
> |
if os.path.exists(lib): |
765 |
> |
tar.add(lib,libDir) |
766 |
> |
|
767 |
> |
## Now check if module dir is present |
768 |
> |
moduleDir = 'module' |
769 |
> |
module = swArea + '/' + moduleDir |
770 |
> |
if os.path.isdir(module): |
771 |
> |
tar.add(module,moduleDir) |
772 |
> |
|
773 |
> |
## Now check if any data dir(s) is present |
774 |
> |
swAreaLen=len(swArea) |
775 |
> |
for root, dirs, files in os.walk(swArea): |
776 |
> |
if "data" in dirs: |
777 |
> |
common.logger.debug(5,"data "+root+"/data"+" to be tarred") |
778 |
> |
tar.add(root+"/data",root[swAreaLen:]+"/data") |
779 |
> |
|
780 |
> |
## Add ProdAgent dir to tar |
781 |
> |
paDir = 'ProdAgentApi' |
782 |
> |
pa = os.environ['CRABDIR'] + '/' + 'ProdAgentApi' |
783 |
> |
if os.path.isdir(pa): |
784 |
> |
tar.add(pa,paDir) |
785 |
> |
|
786 |
> |
### FEDE FOR DBS PUBLICATION |
787 |
> |
## Add PRODCOMMON dir to tar |
788 |
> |
prodcommonDir = 'ProdCommon' |
789 |
> |
prodcommonPath = os.environ['CRABDIR'] + '/' + 'ProdCommon' |
790 |
> |
if os.path.isdir(prodcommonPath): |
791 |
> |
tar.add(prodcommonPath,prodcommonDir) |
792 |
> |
############################# |
793 |
> |
|
794 |
> |
common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames())) |
795 |
> |
tar.close() |
796 |
> |
except : |
797 |
> |
raise CrabException('Could not create tar-ball') |
798 |
> |
|
799 |
> |
## check for tarball size |
800 |
> |
tarballinfo = os.stat(self.tgzNameWithPath) |
801 |
> |
if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) : |
802 |
> |
raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.') |
803 |
> |
|
804 |
> |
## create tar-ball with ML stuff |
805 |
> |
self.MLtgzfile = common.work_space.pathForTgz()+'share/MLfiles.tgz' |
806 |
> |
try: |
807 |
> |
tar = tarfile.open(self.MLtgzfile, "w:gz") |
808 |
> |
path=os.environ['CRABDIR'] + '/python/' |
809 |
> |
for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py']: |
810 |
> |
tar.add(path+file,file) |
811 |
> |
common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames())) |
812 |
> |
tar.close() |
813 |
> |
except : |
814 |
> |
raise CrabException('Could not create ML files tar-ball') |
815 |
|
|
816 |
|
return |
817 |
|
|
818 |
+ |
def additionalInputFileTgz(self): |
819 |
+ |
""" |
820 |
+ |
Put all additional files into a tar ball and return its name |
821 |
+ |
""" |
822 |
+ |
import tarfile |
823 |
+ |
tarName= common.work_space.pathForTgz()+'share/'+self.additional_tgz_name |
824 |
+ |
tar = tarfile.open(tarName, "w:gz") |
825 |
+ |
for file in self.additional_inbox_files: |
826 |
+ |
tar.add(file,string.split(file,'/')[-1]) |
827 |
+ |
common.logger.debug(5,"Files added to "+self.additional_tgz_name+" : "+str(tar.getnames())) |
828 |
+ |
tar.close() |
829 |
+ |
return tarName |
830 |
+ |
|
831 |
|
def wsSetupEnvironment(self, nj): |
832 |
|
""" |
833 |
|
Returns part of a job script which prepares |
838 |
|
|
839 |
|
## OLI_Daniele at this level middleware already known |
840 |
|
|
841 |
+ |
txt += 'echo "### Firtst set SCRAM ARCH and BUILD_ARCH ###"\n' |
842 |
+ |
txt += 'echo "Setting SCRAM_ARCH='+self.executable_arch+'"\n' |
843 |
+ |
txt += 'export SCRAM_ARCH='+self.executable_arch+'\n' |
844 |
+ |
txt += 'export BUILD_ARCH='+self.executable_arch+'\n' |
845 |
|
txt += 'if [ $middleware == LCG ]; then \n' |
846 |
|
txt += self.wsSetupCMSLCGEnvironment_() |
847 |
|
txt += 'elif [ $middleware == OSG ]; then\n' |
849 |
|
txt += ' echo "Created working directory: $WORKING_DIR"\n' |
850 |
|
txt += ' if [ ! -d $WORKING_DIR ] ;then\n' |
851 |
|
txt += ' echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n' |
852 |
< |
txt += ' echo "JOB_EXIT_STATUS = 10016"\n' |
853 |
< |
txt += ' echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n' |
854 |
< |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
852 |
> |
txt += ' echo "JOB_EXIT_STATUS = 10016"\n' |
853 |
> |
txt += ' echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n' |
854 |
> |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
855 |
|
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
856 |
|
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
857 |
|
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
883 |
|
txt += ' cd $RUNTIME_AREA\n' |
884 |
|
txt += ' /bin/rm -rf $WORKING_DIR\n' |
885 |
|
txt += ' if [ -d $WORKING_DIR ] ;then\n' |
886 |
< |
txt += ' echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n' |
887 |
< |
txt += ' echo "JOB_EXIT_STATUS = 10018"\n' |
888 |
< |
txt += ' echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n' |
889 |
< |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
886 |
> |
txt += ' echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n' |
887 |
> |
txt += ' echo "JOB_EXIT_STATUS = 10018"\n' |
888 |
> |
txt += ' echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n' |
889 |
> |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
890 |
|
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
891 |
|
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
892 |
|
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
896 |
|
txt += 'fi \n' |
897 |
|
txt += 'echo "CMSSW_VERSION = '+self.version+'"\n' |
898 |
|
txt += 'cd '+self.version+'\n' |
899 |
+ |
########## FEDE FOR DBS2 ###################### |
900 |
+ |
txt += 'SOFTWARE_DIR=`pwd`\n' |
901 |
+ |
txt += 'echo SOFTWARE_DIR=$SOFTWARE_DIR \n' |
902 |
+ |
############################################### |
903 |
|
### needed grep for bug in scramv1 ### |
904 |
+ |
txt += scram+' runtime -sh\n' |
905 |
|
txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n' |
906 |
+ |
txt += 'echo $PATH\n' |
907 |
|
|
908 |
|
# Handle the arguments: |
909 |
|
txt += "\n" |
925 |
|
txt += ' cd $RUNTIME_AREA\n' |
926 |
|
txt += ' /bin/rm -rf $WORKING_DIR\n' |
927 |
|
txt += ' if [ -d $WORKING_DIR ] ;then\n' |
928 |
< |
txt += ' echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n' |
929 |
< |
txt += ' echo "JOB_EXIT_STATUS = 50114"\n' |
930 |
< |
txt += ' echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n' |
931 |
< |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
928 |
> |
txt += ' echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n' |
929 |
> |
txt += ' echo "JOB_EXIT_STATUS = 50114"\n' |
930 |
> |
txt += ' echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n' |
931 |
> |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
932 |
|
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
933 |
|
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
934 |
|
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
940 |
|
|
941 |
|
# Prepare job-specific part |
942 |
|
job = common.job_list[nj] |
943 |
+ |
### FEDE FOR DBS OUTPUT PUBLICATION |
944 |
+ |
if (self.datasetPath): |
945 |
+ |
txt += '\n' |
946 |
+ |
txt += 'DatasetPath='+self.datasetPath+'\n' |
947 |
+ |
|
948 |
+ |
datasetpath_split = self.datasetPath.split("/") |
949 |
+ |
|
950 |
+ |
txt += 'PrimaryDataset='+datasetpath_split[1]+'\n' |
951 |
+ |
txt += 'DataTier='+datasetpath_split[2]+'\n' |
952 |
+ |
#txt += 'ProcessedDataset='+datasetpath_split[3]+'\n' |
953 |
+ |
txt += 'ApplicationFamily=cmsRun\n' |
954 |
+ |
|
955 |
+ |
else: |
956 |
+ |
txt += 'DatasetPath=MCDataTier\n' |
957 |
+ |
txt += 'PrimaryDataset=null\n' |
958 |
+ |
txt += 'DataTier=null\n' |
959 |
+ |
#txt += 'ProcessedDataset=null\n' |
960 |
+ |
txt += 'ApplicationFamily=MCDataTier\n' |
961 |
|
if self.pset != None: #CarlosDaniele |
962 |
|
pset = os.path.basename(job.configFilename()) |
963 |
|
txt += '\n' |
964 |
+ |
txt += 'cp $RUNTIME_AREA/'+pset+' .\n' |
965 |
|
if (self.datasetPath): # standard job |
966 |
|
#txt += 'InputFiles=$2\n' |
967 |
|
txt += 'InputFiles=${args[1]}\n' |
968 |
|
txt += 'MaxEvents=${args[2]}\n' |
969 |
|
txt += 'SkipEvents=${args[3]}\n' |
970 |
|
txt += 'echo "Inputfiles:<$InputFiles>"\n' |
971 |
< |
txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset_tmp_1.cfg\n' |
971 |
> |
txt += 'sed "s#{\'INPUT\'}#$InputFiles#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
972 |
|
txt += 'echo "MaxEvents:<$MaxEvents>"\n' |
973 |
< |
txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" pset_tmp_1.cfg > pset_tmp_2.cfg\n' |
973 |
> |
txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
974 |
|
txt += 'echo "SkipEvents:<$SkipEvents>"\n' |
975 |
< |
txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" pset_tmp_2.cfg > pset.cfg\n' |
975 |
> |
txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
976 |
|
else: # pythia like job |
977 |
+ |
seedIndex=1 |
978 |
+ |
if (self.firstRun): |
979 |
+ |
txt += 'FirstRun=${args['+str(seedIndex)+']}\n' |
980 |
+ |
txt += 'echo "FirstRun: <$FirstRun>"\n' |
981 |
+ |
txt += 'sed "s#\<INPUTFIRSTRUN\>#$FirstRun#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
982 |
+ |
seedIndex=seedIndex+1 |
983 |
+ |
|
984 |
|
if (self.sourceSeed): |
985 |
< |
# txt += 'Seed=$2\n' |
986 |
< |
txt += 'Seed=${args[1]}\n' |
987 |
< |
txt += 'echo "Seed: <$Seed>"\n' |
988 |
< |
txt += 'sed "s#\<INPUT\>#$Seed#" $RUNTIME_AREA/'+pset+' > tmp.cfg\n' |
985 |
> |
txt += 'Seed=${args['+str(seedIndex)+']}\n' |
986 |
> |
txt += 'sed "s#\<INPUT\>#$Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
987 |
> |
seedIndex=seedIndex+1 |
988 |
> |
## the following seeds are not always present |
989 |
|
if (self.sourceSeedVtx): |
990 |
< |
# txt += 'VtxSeed=$3\n' |
827 |
< |
txt += 'VtxSeed=${args[2]}\n' |
990 |
> |
txt += 'VtxSeed=${args['+str(seedIndex)+']}\n' |
991 |
|
txt += 'echo "VtxSeed: <$VtxSeed>"\n' |
992 |
< |
txt += 'sed "s#INPUTVTX#$VtxSeed#" tmp.cfg > pset.cfg\n' |
993 |
< |
else: |
994 |
< |
txt += 'mv tmp.cfg pset.cfg\n' |
995 |
< |
else: |
996 |
< |
txt += '# Copy untouched pset\n' |
997 |
< |
txt += 'cp $RUNTIME_AREA/'+pset+' pset.cfg\n' |
998 |
< |
|
992 |
> |
txt += 'sed "s#\<INPUTVTX\>#$VtxSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
993 |
> |
seedIndex += 1 |
994 |
> |
if (self.sourceSeedG4): |
995 |
> |
txt += 'G4Seed=${args['+str(seedIndex)+']}\n' |
996 |
> |
txt += 'echo "G4Seed: <$G4Seed>"\n' |
997 |
> |
txt += 'sed "s#\<INPUTG4\>#$G4Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
998 |
> |
seedIndex += 1 |
999 |
> |
if (self.sourceSeedMix): |
1000 |
> |
txt += 'mixSeed=${args['+str(seedIndex)+']}\n' |
1001 |
> |
txt += 'echo "MixSeed: <$mixSeed>"\n' |
1002 |
> |
txt += 'sed "s#\<INPUTMIX\>#$mixSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n' |
1003 |
> |
seedIndex += 1 |
1004 |
> |
pass |
1005 |
> |
pass |
1006 |
> |
txt += 'mv -f '+pset+' pset.cfg\n' |
1007 |
|
|
1008 |
|
if len(self.additional_inbox_files) > 0: |
1009 |
< |
for file in self.additional_inbox_files: |
1010 |
< |
relFile = file.split("/")[-1] |
1011 |
< |
txt += 'if [ -e $RUNTIME_AREA/'+relFile+' ] ; then\n' |
841 |
< |
txt += ' cp $RUNTIME_AREA/'+relFile+' .\n' |
842 |
< |
txt += ' chmod +x '+relFile+'\n' |
843 |
< |
txt += 'fi\n' |
1009 |
> |
txt += 'if [ -e $RUNTIME_AREA/'+self.additional_tgz_name+' ] ; then\n' |
1010 |
> |
txt += ' tar xzvf $RUNTIME_AREA/'+self.additional_tgz_name+'\n' |
1011 |
> |
txt += 'fi\n' |
1012 |
|
pass |
1013 |
|
|
1014 |
|
if self.pset != None: #CarlosDaniele |
1019 |
|
txt += 'cat pset.cfg\n' |
1020 |
|
txt += 'echo "****** end pset.cfg ********"\n' |
1021 |
|
txt += '\n' |
1022 |
+ |
### FEDE FOR DBS OUTPUT PUBLICATION |
1023 |
+ |
txt += 'PSETHASH=`EdmConfigHash < pset.cfg` \n' |
1024 |
+ |
txt += 'echo "PSETHASH = $PSETHASH" \n' |
1025 |
+ |
############## |
1026 |
+ |
txt += '\n' |
1027 |
|
# txt += 'echo "***** cat pset1.cfg *********"\n' |
1028 |
|
# txt += 'cat pset1.cfg\n' |
1029 |
|
# txt += 'echo "****** end pset1.cfg ********"\n' |
1030 |
|
return txt |
1031 |
|
|
1032 |
< |
def wsBuildExe(self, nj): |
1032 |
> |
def wsBuildExe(self, nj=0): |
1033 |
|
""" |
1034 |
|
Put in the script the commands to build an executable |
1035 |
|
or a library. |
1065 |
|
txt += ' echo "Successful untar" \n' |
1066 |
|
txt += 'fi \n' |
1067 |
|
txt += '\n' |
1068 |
< |
txt += 'echo "Include ProdAgentApi in PYTHONPATH"\n' |
1068 |
> |
txt += 'echo "Include ProdAgentApi and PRODCOMMON in PYTHONPATH"\n' |
1069 |
|
txt += 'if [ -z "$PYTHONPATH" ]; then\n' |
1070 |
< |
txt += ' export PYTHONPATH=ProdAgentApi\n' |
1070 |
> |
#### FEDE FOR DBS OUTPUT PUBLICATION |
1071 |
> |
txt += ' export PYTHONPATH=$SOFTWARE_DIR/ProdAgentApi:$SOFTWARE_DIR/ProdCommon\n' |
1072 |
> |
#txt += ' export PYTHONPATH=`pwd`/ProdAgentApi:`pwd`/ProdCommon\n' |
1073 |
> |
#txt += ' export PYTHONPATH=ProdAgentApi\n' |
1074 |
|
txt += 'else\n' |
1075 |
< |
txt += ' export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n' |
1075 |
> |
txt += ' export PYTHONPATH=$SOFTWARE_DIR/ProdAgentApi:$SOFTWARE_DIR/ProdCommon:${PYTHONPATH}\n' |
1076 |
> |
#txt += ' export PYTHONPATH=`pwd`/ProdAgentApi:`pwd`/ProdCommon:${PYTHONPATH}\n' |
1077 |
> |
#txt += ' export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n' |
1078 |
> |
txt += 'echo "PYTHONPATH=$PYTHONPATH"\n' |
1079 |
> |
################### |
1080 |
|
txt += 'fi\n' |
1081 |
|
txt += '\n' |
1082 |
|
|
1091 |
|
""" |
1092 |
|
|
1093 |
|
def executableName(self): |
1094 |
< |
if self.pset == None: #CarlosDaniele |
1094 |
> |
if self.scriptExe: #CarlosDaniele |
1095 |
|
return "sh " |
1096 |
|
else: |
1097 |
|
return self.executable |
1098 |
|
|
1099 |
|
def executableArgs(self): |
1100 |
< |
if self.pset == None:#CarlosDaniele |
1100 |
> |
if self.scriptExe:#CarlosDaniele |
1101 |
|
return self.scriptExe + " $NJob" |
1102 |
< |
else: |
1103 |
< |
return " -p pset.cfg" |
1102 |
> |
else: |
1103 |
> |
# if >= CMSSW_1_5_X, add -e |
1104 |
> |
version_array = self.scram.getSWVersion().split('_') |
1105 |
> |
major = 0 |
1106 |
> |
minor = 0 |
1107 |
> |
try: |
1108 |
> |
major = int(version_array[1]) |
1109 |
> |
minor = int(version_array[2]) |
1110 |
> |
except: |
1111 |
> |
msg = "Cannot parse CMSSW version string: " + "_".join(version_array) + " for major and minor release number!" |
1112 |
> |
raise CrabException(msg) |
1113 |
> |
if major >= 1 and minor >= 5 : |
1114 |
> |
return " -e -p pset.cfg" |
1115 |
> |
else: |
1116 |
> |
return " -p pset.cfg" |
1117 |
|
|
1118 |
|
def inputSandbox(self, nj): |
1119 |
|
""" |
1125 |
|
## code |
1126 |
|
if os.path.isfile(self.tgzNameWithPath): |
1127 |
|
inp_box.append(self.tgzNameWithPath) |
1128 |
+ |
if os.path.isfile(self.MLtgzfile): |
1129 |
+ |
inp_box.append(self.MLtgzfile) |
1130 |
|
## config |
1131 |
< |
if not self.pset is None: #CarlosDaniele |
1132 |
< |
inp_box.append(common.job_list[nj].configFilename()) |
1131 |
> |
if not self.pset is None: |
1132 |
> |
inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename()) |
1133 |
|
## additional input files |
1134 |
< |
#for file in self.additional_inbox_files: |
1135 |
< |
# inp_box.append(common.work_space.cwdDir()+file) |
1134 |
> |
tgz = self.additionalInputFileTgz() |
1135 |
> |
inp_box.append(tgz) |
1136 |
|
return inp_box |
1137 |
|
|
1138 |
|
def outputSandbox(self, nj): |
1166 |
|
output_file_num = self.numberFile_(fileWithSuffix, '$NJob') |
1167 |
|
txt += '\n' |
1168 |
|
txt += '# check output file\n' |
1169 |
< |
txt += 'ls '+fileWithSuffix+'\n' |
1170 |
< |
txt += 'ls_result=$?\n' |
1171 |
< |
txt += 'if [ $ls_result -ne 0 ] ; then\n' |
1172 |
< |
txt += ' echo "ERROR: Problem with output file"\n' |
1169 |
> |
# txt += 'ls '+fileWithSuffix+'\n' |
1170 |
> |
# txt += 'ls_result=$?\n' |
1171 |
> |
txt += 'if [ -e ./'+fileWithSuffix+' ] ; then\n' |
1172 |
> |
txt += ' mv '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n' |
1173 |
> |
txt += 'else\n' |
1174 |
> |
txt += ' exit_status=60302\n' |
1175 |
> |
txt += ' echo "ERROR: Problem with output file '+fileWithSuffix+'"\n' |
1176 |
|
if common.scheduler.boss_scheduler_name == 'condor_g': |
1177 |
|
txt += ' if [ $middleware == OSG ]; then \n' |
1178 |
|
txt += ' echo "prepare dummy output file"\n' |
1179 |
|
txt += ' echo "Processing of job output failed" > $RUNTIME_AREA/'+output_file_num+'\n' |
1180 |
|
txt += ' fi \n' |
983 |
– |
txt += 'else\n' |
984 |
– |
txt += ' cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n' |
1181 |
|
txt += 'fi\n' |
1182 |
+ |
file_list = [] |
1183 |
+ |
for fileWithSuffix in (self.output_file): |
1184 |
+ |
file_list.append(self.numberFile_(fileWithSuffix, '$NJob')) |
1185 |
+ |
txt += 'file_list="'+string.join(file_list,' ')+'"\n' |
1186 |
|
|
1187 |
|
txt += 'cd $RUNTIME_AREA\n' |
1188 |
< |
txt += 'cd $RUNTIME_AREA\n' |
1188 |
> |
#### FEDE this is the cleanEnv function |
1189 |
|
### OLI_DANIELE |
1190 |
< |
txt += 'if [ $middleware == OSG ]; then\n' |
1191 |
< |
txt += ' cd $RUNTIME_AREA\n' |
1192 |
< |
txt += ' echo "Remove working directory: $WORKING_DIR"\n' |
1193 |
< |
txt += ' /bin/rm -rf $WORKING_DIR\n' |
1194 |
< |
txt += ' if [ -d $WORKING_DIR ] ;then\n' |
1195 |
< |
txt += ' echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n' |
1196 |
< |
txt += ' echo "JOB_EXIT_STATUS = 60999"\n' |
1197 |
< |
txt += ' echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n' |
1198 |
< |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
1199 |
< |
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
1200 |
< |
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
1201 |
< |
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
1202 |
< |
txt += ' fi\n' |
1203 |
< |
txt += 'fi\n' |
1204 |
< |
txt += '\n' |
1190 |
> |
#txt += 'if [ $middleware == OSG ]; then\n' |
1191 |
> |
#txt += ' cd $RUNTIME_AREA\n' |
1192 |
> |
#txt += ' echo "Remove working directory: $WORKING_DIR"\n' |
1193 |
> |
#txt += ' /bin/rm -rf $WORKING_DIR\n' |
1194 |
> |
#txt += ' if [ -d $WORKING_DIR ] ;then\n' |
1195 |
> |
#txt += ' echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n' |
1196 |
> |
#txt += ' echo "JOB_EXIT_STATUS = 60999"\n' |
1197 |
> |
#txt += ' echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n' |
1198 |
> |
#txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
1199 |
> |
#txt += ' rm -f $RUNTIME_AREA/$repo \n' |
1200 |
> |
#txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
1201 |
> |
#txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
1202 |
> |
#txt += ' fi\n' |
1203 |
> |
#txt += 'fi\n' |
1204 |
> |
#txt += '\n' |
1205 |
|
|
1006 |
– |
file_list = '' |
1007 |
– |
## Add to filelist only files to be possibly copied to SE |
1008 |
– |
for fileWithSuffix in self.output_file: |
1009 |
– |
output_file_num = self.numberFile_(fileWithSuffix, '$NJob') |
1010 |
– |
file_list=file_list+output_file_num+' ' |
1011 |
– |
file_list=file_list[:-1] |
1012 |
– |
txt += 'file_list="'+file_list+'"\n' |
1206 |
|
|
1207 |
|
return txt |
1208 |
|
|
1214 |
|
# take away last extension |
1215 |
|
name = p[0] |
1216 |
|
for x in p[1:-1]: |
1217 |
< |
name=name+"."+x |
1217 |
> |
name=name+"."+x |
1218 |
|
# add "_txt" |
1219 |
|
if len(p)>1: |
1220 |
< |
ext = p[len(p)-1] |
1221 |
< |
result = name + '_' + txt + "." + ext |
1220 |
> |
ext = p[len(p)-1] |
1221 |
> |
result = name + '_' + txt + "." + ext |
1222 |
|
else: |
1223 |
< |
result = name + '_' + txt |
1223 |
> |
result = name + '_' + txt |
1224 |
|
|
1225 |
|
return result |
1226 |
|
|
1227 |
< |
def getRequirements(self): |
1227 |
> |
def getRequirements(self, nj=[]): |
1228 |
|
""" |
1229 |
|
return job requirements to add to jdl files |
1230 |
|
""" |
1233 |
|
req='Member("VO-cms-' + \ |
1234 |
|
self.version + \ |
1235 |
|
'", other.GlueHostApplicationSoftwareRunTimeEnvironment)' |
1236 |
+ |
## SL add requirement for OS version only if SL4 |
1237 |
+ |
reSL4 = re.compile( r'slc4' ) |
1238 |
+ |
if self.executable_arch and reSL4.search(self.executable_arch): |
1239 |
+ |
req+=' && Member("VO-cms-' + \ |
1240 |
+ |
self.executable_arch + \ |
1241 |
+ |
'", other.GlueHostApplicationSoftwareRunTimeEnvironment)' |
1242 |
|
|
1243 |
|
req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)' |
1244 |
|
|
1258 |
|
txt += ' echo "### SETUP CMS OSG ENVIRONMENT ###"\n' |
1259 |
|
txt += ' if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n' |
1260 |
|
txt += ' # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n' |
1261 |
+ |
txt += ' export SCRAM_ARCH='+self.executable_arch+'\n' |
1262 |
|
txt += ' source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n' |
1263 |
|
txt += ' elif [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n' |
1264 |
|
txt += ' # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n' |
1265 |
+ |
txt += ' export SCRAM_ARCH='+self.executable_arch+'\n' |
1266 |
|
txt += ' source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n' |
1267 |
|
txt += ' else\n' |
1268 |
|
txt += ' echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n' |
1278 |
|
txt += ' cd $RUNTIME_AREA\n' |
1279 |
|
txt += ' /bin/rm -rf $WORKING_DIR\n' |
1280 |
|
txt += ' if [ -d $WORKING_DIR ] ;then\n' |
1281 |
< |
txt += ' echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n' |
1282 |
< |
txt += ' echo "JOB_EXIT_STATUS = 10017"\n' |
1283 |
< |
txt += ' echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n' |
1284 |
< |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
1285 |
< |
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
1286 |
< |
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
1287 |
< |
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
1281 |
> |
txt += ' echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n' |
1282 |
> |
txt += ' echo "JOB_EXIT_STATUS = 10017"\n' |
1283 |
> |
txt += ' echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n' |
1284 |
> |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
1285 |
> |
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
1286 |
> |
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
1287 |
> |
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
1288 |
|
txt += ' fi\n' |
1289 |
|
txt += '\n' |
1290 |
|
txt += ' exit 1\n' |
1339 |
|
txt += ' fi\n' |
1340 |
|
txt += ' fi\n' |
1341 |
|
txt += ' \n' |
1141 |
– |
txt += ' string=`cat /etc/redhat-release`\n' |
1142 |
– |
txt += ' echo $string\n' |
1143 |
– |
txt += ' if [[ $string = *alhalla* ]]; then\n' |
1144 |
– |
txt += ' echo "SCRAM_ARCH= $SCRAM_ARCH"\n' |
1145 |
– |
txt += ' elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n' |
1146 |
– |
txt += ' export SCRAM_ARCH=slc3_ia32_gcc323\n' |
1147 |
– |
txt += ' echo "SCRAM_ARCH= $SCRAM_ARCH"\n' |
1148 |
– |
txt += ' else\n' |
1149 |
– |
txt += ' echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n' |
1150 |
– |
txt += ' echo "JOB_EXIT_STATUS = 10033"\n' |
1151 |
– |
txt += ' echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n' |
1152 |
– |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
1153 |
– |
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
1154 |
– |
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
1155 |
– |
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
1156 |
– |
txt += ' exit 1\n' |
1157 |
– |
txt += ' fi\n' |
1342 |
|
txt += ' echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n' |
1343 |
|
txt += ' echo "### END SETUP CMS LCG ENVIRONMENT ###"\n' |
1344 |
|
return txt |
1345 |
|
|
1346 |
+ |
### FEDE FOR DBS OUTPUT PUBLICATION |
1347 |
+ |
def modifyReport(self, nj): |
1348 |
+ |
""" |
1349 |
+ |
insert the part of the script that modifies the FrameworkJob Report |
1350 |
+ |
""" |
1351 |
+ |
|
1352 |
+ |
txt = '' |
1353 |
+ |
txt += 'echo "Modify Job Report" \n' |
1354 |
+ |
#txt += 'chmod a+x $RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py\n' |
1355 |
+ |
################ FEDE FOR DBS2 ############################################# |
1356 |
+ |
txt += 'chmod a+x $SOFTWARE_DIR/ProdAgentApi/FwkJobRep/ModifyJobReport.py\n' |
1357 |
+ |
############################################################################# |
1358 |
+ |
try: |
1359 |
+ |
publish_data = int(self.cfg_params['USER.publish_data']) |
1360 |
+ |
except KeyError: |
1361 |
+ |
publish_data = 0 |
1362 |
+ |
|
1363 |
+ |
txt += 'if [ -z "$SE" ]; then\n' |
1364 |
+ |
txt += ' SE="" \n' |
1365 |
+ |
txt += 'fi \n' |
1366 |
+ |
txt += 'if [ -z "$SE_PATH" ]; then\n' |
1367 |
+ |
txt += ' SE_PATH="" \n' |
1368 |
+ |
txt += 'fi \n' |
1369 |
+ |
txt += 'echo "SE = $SE"\n' |
1370 |
+ |
txt += 'echo "SE_PATH = $SE_PATH"\n' |
1371 |
+ |
|
1372 |
+ |
if (publish_data == 1): |
1373 |
+ |
#processedDataset = self.cfg_params['USER.processed_datasetname'] |
1374 |
+ |
processedDataset = self.cfg_params['USER.publish_data_name'] |
1375 |
+ |
txt += 'ProcessedDataset='+processedDataset+'\n' |
1376 |
+ |
#### LFN=/store/user/<user>/processedDataset_PSETHASH |
1377 |
+ |
txt += 'if [ "$SE_PATH" == "" ]; then\n' |
1378 |
+ |
#### FEDE: added slash in LFN ############## |
1379 |
+ |
txt += ' FOR_LFN=/copy_problems/ \n' |
1380 |
+ |
txt += 'else \n' |
1381 |
+ |
txt += ' tmp=`echo $SE_PATH | awk -F \'store\' \'{print$2}\'` \n' |
1382 |
+ |
##### FEDE TO BE CHANGED, BECAUSE STORE IS HARDCODED!!!! ######## |
1383 |
+ |
txt += ' FOR_LFN=/store$tmp \n' |
1384 |
+ |
txt += 'fi \n' |
1385 |
+ |
txt += 'echo "ProcessedDataset = $ProcessedDataset"\n' |
1386 |
+ |
txt += 'echo "FOR_LFN = $FOR_LFN" \n' |
1387 |
+ |
txt += 'echo "CMSSW_VERSION = $CMSSW_VERSION"\n\n' |
1388 |
+ |
#txt += 'echo "$RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n' |
1389 |
+ |
txt += 'echo "$SOFTWARE_DIR/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n' |
1390 |
+ |
txt += '$SOFTWARE_DIR/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n' |
1391 |
+ |
#txt += '$RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n' |
1392 |
+ |
|
1393 |
+ |
txt += 'modifyReport_result=$?\n' |
1394 |
+ |
txt += 'echo modifyReport_result = $modifyReport_result\n' |
1395 |
+ |
txt += 'if [ $modifyReport_result -ne 0 ]; then\n' |
1396 |
+ |
txt += ' exit_status=1\n' |
1397 |
+ |
txt += ' echo "ERROR: Problem with ModifyJobReport"\n' |
1398 |
+ |
txt += 'else\n' |
1399 |
+ |
txt += ' mv NewFrameworkJobReport.xml crab_fjr_$NJob.xml\n' |
1400 |
+ |
txt += 'fi\n' |
1401 |
+ |
else: |
1402 |
+ |
txt += 'ProcessedDataset=no_data_to_publish \n' |
1403 |
+ |
#### FEDE: added slash in LFN ############## |
1404 |
+ |
txt += 'FOR_LFN=/local/ \n' |
1405 |
+ |
txt += 'echo "ProcessedDataset = $ProcessedDataset"\n' |
1406 |
+ |
txt += 'echo "FOR_LFN = $FOR_LFN" \n' |
1407 |
+ |
return txt |
1408 |
+ |
|
1409 |
+ |
def cleanEnv(self): |
1410 |
+ |
### OLI_DANIELE |
1411 |
+ |
txt = '' |
1412 |
+ |
txt += 'if [ $middleware == OSG ]; then\n' |
1413 |
+ |
txt += ' cd $RUNTIME_AREA\n' |
1414 |
+ |
txt += ' echo "Remove working directory: $WORKING_DIR"\n' |
1415 |
+ |
txt += ' /bin/rm -rf $WORKING_DIR\n' |
1416 |
+ |
txt += ' if [ -d $WORKING_DIR ] ;then\n' |
1417 |
+ |
txt += ' echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n' |
1418 |
+ |
txt += ' echo "JOB_EXIT_STATUS = 60999"\n' |
1419 |
+ |
txt += ' echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n' |
1420 |
+ |
txt += ' dumpStatus $RUNTIME_AREA/$repo\n' |
1421 |
+ |
txt += ' rm -f $RUNTIME_AREA/$repo \n' |
1422 |
+ |
txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n' |
1423 |
+ |
txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n' |
1424 |
+ |
txt += ' fi\n' |
1425 |
+ |
txt += 'fi\n' |
1426 |
+ |
txt += '\n' |
1427 |
+ |
return txt |
1428 |
+ |
|
1429 |
|
def setParam_(self, param, value): |
1430 |
|
self._params[param] = value |
1431 |
|
|
1438 |
|
def getTaskid(self): |
1439 |
|
return self._taskId |
1440 |
|
|
1174 |
– |
####################################################################### |
1441 |
|
def uniquelist(self, old): |
1442 |
|
""" |
1443 |
|
remove duplicates from a list |