ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.13 by gutsche, Tue Jun 27 02:31:31 2006 UTC vs.
Revision 1.93 by fanzago, Tue Jun 19 17:22:21 2007 UTC

# Line 3 | Line 3 | from crab_logger import Logger
3   from crab_exceptions import *
4   from crab_util import *
5   import common
6 import PsetManipulator  
7
8 import DBSInfo_EDM
9 import DataDiscovery_EDM
10 import DataLocation_EDM
6   import Scram
7  
8 < import os, string, re
8 > import os, string, re, shutil, glob
9  
10   class Cmssw(JobType):
11 <    def __init__(self, cfg_params):
11 >    def __init__(self, cfg_params, ncjobs):
12          JobType.__init__(self, 'CMSSW')
13          common.logger.debug(3,'CMSSW::__init__')
14  
20        self.analisys_common_info = {}
15          # Marco.
16          self._params = {}
17          self.cfg_params = cfg_params
18 +
19 +        try:
20 +            self.MaxTarBallSize = float(self.cfg_params['EDG.maxtarballsize'])
21 +        except KeyError:
22 +            self.MaxTarBallSize = 9.5
23 +
24 +        # number of jobs requested to be created, limit obj splitting
25 +        self.ncjobs = ncjobs
26 +
27          log = common.logger
28          
29          self.scram = Scram.Scram(cfg_params)
27        scramArea = ''
30          self.additional_inbox_files = []
31          self.scriptExe = ''
32          self.executable = ''
33 +        self.executable_arch = self.scram.getArch()
34          self.tgz_name = 'default.tgz'
35 +        self.scriptName = 'CMSSW.sh'
36 +        self.pset = ''      #scrip use case Da  
37 +        self.datasetPath = '' #scrip use case Da
38  
39 +        # set FJR file name
40 +        self.fjrFileName = 'crab_fjr.xml'
41  
42          self.version = self.scram.getSWVersion()
43 +        common.taskDB.setDict('codeVersion',self.version)
44          self.setParam_('application', self.version)
36        common.analisys_common_info['sw_version'] = self.version
37        ### FEDE
38        common.analisys_common_info['copy_input_data'] = 0
39        common.analisys_common_info['events_management'] = 1
45  
46          ### collect Data cards
47 +
48 +        ## get DBS mode
49 +        try:
50 +            self.use_dbs_1 = int(self.cfg_params['CMSSW.use_dbs_1'])
51 +        except KeyError:
52 +            self.use_dbs_1 = 0
53 +            
54          try:
55              tmp =  cfg_params['CMSSW.datasetpath']
56              log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
57              if string.lower(tmp)=='none':
58                  self.datasetPath = None
59 +                self.selectNoInput = 1
60              else:
61                  self.datasetPath = tmp
62 +                self.selectNoInput = 0
63          except KeyError:
64              msg = "Error: datasetpath not defined "  
65              raise CrabException(msg)
# Line 56 | Line 70 | class Cmssw(JobType):
70              self.setParam_('dataset', 'None')
71              self.setParam_('owner', 'None')
72          else:
73 <            datasetpath_split = self.datasetPath.split("/")
74 <            self.setParam_('dataset', datasetpath_split[1])
75 <            self.setParam_('owner', datasetpath_split[-1])
76 <
73 >            try:
74 >                datasetpath_split = self.datasetPath.split("/")
75 >                # standard style
76 >                if self.use_dbs_1 == 1 :
77 >                    self.setParam_('dataset', datasetpath_split[1])
78 >                    self.setParam_('owner', datasetpath_split[-1])
79 >                else:
80 >                    self.setParam_('dataset', datasetpath_split[1])
81 >                    self.setParam_('owner', datasetpath_split[2])
82 >            except:
83 >                self.setParam_('dataset', self.datasetPath)
84 >                self.setParam_('owner', self.datasetPath)
85 >                
86          self.setTaskid_()
87          self.setParam_('taskId', self.cfg_params['taskId'])
88  
# Line 82 | Line 105 | class Cmssw(JobType):
105          try:
106              self.pset = cfg_params['CMSSW.pset']
107              log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
108 <            if (not os.path.exists(self.pset)):
109 <                raise CrabException("User defined PSet file "+self.pset+" does not exist")
108 >            if self.pset.lower() != 'none' :
109 >                if (not os.path.exists(self.pset)):
110 >                    raise CrabException("User defined PSet file "+self.pset+" does not exist")
111 >            else:
112 >                self.pset = None
113          except KeyError:
114              raise CrabException("PSet file missing. Cannot run cmsRun ")
115  
116          # output files
117 +        ## stuff which must be returned always via sandbox
118 +        self.output_file_sandbox = []
119 +
120 +        # add fjr report by default via sandbox
121 +        self.output_file_sandbox.append(self.fjrFileName)
122 +
123 +        # other output files to be returned via sandbox or copied to SE
124          try:
125              self.output_file = []
93
126              tmp = cfg_params['CMSSW.output_file']
127              if tmp != '':
128                  tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
# Line 100 | Line 132 | class Cmssw(JobType):
132                      self.output_file.append(tmp)
133                      pass
134              else:
135 <                log.message("No output file defined: only stdout/err will be available")
135 >                log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
136                  pass
137              pass
138          except KeyError:
139 <            log.message("No output file defined: only stdout/err will be available")
139 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
140              pass
141  
142          # script_exe file as additional file in inputSandbox
143          try:
144              self.scriptExe = cfg_params['USER.script_exe']
113            self.additional_inbox_files.append(self.scriptExe)
145              if self.scriptExe != '':
146                 if not os.path.isfile(self.scriptExe):
147 <                  msg ="WARNING. file "+self.scriptExe+" not found"
147 >                  msg ="ERROR. file "+self.scriptExe+" not found"
148                    raise CrabException(msg)
149 +               self.additional_inbox_files.append(string.strip(self.scriptExe))
150          except KeyError:
151 <           pass
152 <                  
151 >            self.scriptExe = ''
152 >
153 >        #CarlosDaniele
154 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
155 >           msg ="Error. script_exe  not defined"
156 >           raise CrabException(msg)
157 >
158          ## additional input files
159          try:
160 <            tmpAddFiles = string.split(cfg_params['CMSSW.additional_input_files'],',')
160 >            tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
161              for tmp in tmpAddFiles:
162 <                if not os.path.exists(tmp):
163 <                    raise CrabException("Additional input file not found: "+tmp)
164 <                tmp=string.strip(tmp)
165 <                self.additional_inbox_files.append(tmp)
162 >                tmp = string.strip(tmp)
163 >                dirname = ''
164 >                if not tmp[0]=="/": dirname = "."
165 >                files = []
166 >                if string.find(tmp,"*")>-1:
167 >                    files = glob.glob(os.path.join(dirname, tmp))
168 >                    if len(files)==0:
169 >                        raise CrabException("No additional input file found with this pattern: "+tmp)
170 >                else:
171 >                    files.append(tmp)
172 >                for file in files:
173 >                    if not os.path.exists(file):
174 >                        raise CrabException("Additional input file not found: "+file)
175 >                    pass
176 >                    fname = string.split(file, '/')[-1]
177 >                    storedFile = common.work_space.pathForTgz()+'share/'+fname
178 >                    shutil.copyfile(file, storedFile)
179 >                    self.additional_inbox_files.append(string.strip(storedFile))
180                  pass
181              pass
182 +            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
183          except KeyError:
184              pass
185  
186          # files per job
187          try:
188 <            self.filesPerJob = int(cfg_params['CMSSW.files_per_jobs']) #Daniele
189 <            self.selectFilesPerJob = 1
188 >            if (cfg_params['CMSSW.files_per_jobs']):
189 >                raise CrabException("files_per_jobs no longer supported.  Quitting.")
190          except KeyError:
191 <            self.filesPerJob = 0
140 <            self.selectFilesPerJob = 0
191 >            pass
192  
193          ## Events per job
194          try:
# Line 147 | Line 198 | class Cmssw(JobType):
198              self.eventsPerJob = -1
199              self.selectEventsPerJob = 0
200      
201 <        # To be implemented
151 <        # ## number of jobs
152 <        # try:
153 <        #     self.numberOfJobs =int( cfg_params['CMSSW.number_of_job'])
154 <        #     self.selectNumberOfJobs = 1
155 <        # except KeyError:
156 <        #     self.selectNumberOfJobs = 0
157 <
158 <        if (self.selectFilesPerJob == self.selectEventsPerJob):
159 <            msg = 'Must define either files_per_jobs or events_per_job'
160 <            raise CrabException(msg)
161 <
162 <        if (self.selectEventsPerJob  and not self.datasetPath == None):
163 <            msg = 'Splitting according to events_per_job available only with None as datasetpath'
164 <            raise CrabException(msg)
165 <    
201 >        ## number of jobs
202          try:
203 <            self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
203 >            self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs'])
204 >            self.selectNumberOfJobs = 1
205          except KeyError:
206 <            msg = 'Must define total_number_of_events'
207 <            raise CrabException(msg)
208 <        
172 <        CEBlackList = []
206 >            self.theNumberOfJobs = 0
207 >            self.selectNumberOfJobs = 0
208 >
209          try:
210 <            tmpBad = string.split(cfg_params['EDG.ce_black_list'],',')
211 <            for tmp in tmpBad:
176 <                tmp=string.strip(tmp)
177 <                CEBlackList.append(tmp)
210 >            self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
211 >            self.selectTotalNumberEvents = 1
212          except KeyError:
213 <            pass
213 >            self.total_number_of_events = 0
214 >            self.selectTotalNumberEvents = 0
215  
216 <        self.reCEBlackList=[]
217 <        for bad in CEBlackList:
218 <            self.reCEBlackList.append(re.compile( bad ))
216 >        if self.pset != None: #CarlosDaniele
217 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
218 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
219 >                 raise CrabException(msg)
220 >        else:
221 >             if (self.selectNumberOfJobs == 0):
222 >                 msg = 'Must specify  number_of_jobs.'
223 >                 raise CrabException(msg)
224  
225 <        common.logger.debug(5,'CEBlackList: '+str(CEBlackList))
225 >        ## source seed for pythia
226 >        try:
227 >            self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
228 >        except KeyError:
229 >            self.sourceSeed = None
230 >            common.logger.debug(5,"No seed given")
231  
187        CEWhiteList = []
232          try:
233 <            tmpGood = string.split(cfg_params['EDG.ce_white_list'],',')
190 <            for tmp in tmpGood:
191 <                tmp=string.strip(tmp)
192 <                CEWhiteList.append(tmp)
233 >            self.sourceSeedVtx = int(cfg_params['CMSSW.vtx_seed'])
234          except KeyError:
235 <            pass
235 >            self.sourceSeedVtx = None
236 >            common.logger.debug(5,"No vertex seed given")
237  
238 <        #print 'CEWhiteList: ',CEWhiteList
239 <        self.reCEWhiteList=[]
240 <        for Good in CEWhiteList:
241 <            self.reCEWhiteList.append(re.compile( Good ))
238 >        try:
239 >            self.sourceSeedG4 = int(cfg_params['CMSSW.g4_seed'])
240 >        except KeyError:
241 >            self.sourceSeedG4 = None
242 >            common.logger.debug(5,"No g4 sim hits seed given")
243  
244 <        common.logger.debug(5,'CEWhiteList: '+str(CEWhiteList))
244 >        try:
245 >            self.sourceSeedMix = int(cfg_params['CMSSW.mix_seed'])
246 >        except KeyError:
247 >            self.sourceSeedMix = None
248 >            common.logger.debug(5,"No mix seed given")
249  
250 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
250 >        try:
251 >            self.firstRun = int(cfg_params['CMSSW.first_run'])
252 >        except KeyError:
253 >            self.firstRun = None
254 >            common.logger.debug(5,"No first run given")
255 >        if self.pset != None: #CarlosDaniele
256 >            import PsetManipulator  
257 >            PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
258  
259          #DBSDLS-start
260          ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
261          self.maxEvents=0  # max events available   ( --> check the requested nb. of evts in Creator.py)
262          self.DBSPaths={}  # all dbs paths requested ( --> input to the site local discovery script)
263 +        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
264          ## Perform the data location and discovery (based on DBS/DLS)
265          ## SL: Don't if NONE is specified as input (pythia use case)
266 <        common.analisys_common_info['sites']=None
266 >        blockSites = {}
267          if self.datasetPath:
268 <            self.DataDiscoveryAndLocation(cfg_params)
268 >            blockSites = self.DataDiscoveryAndLocation(cfg_params)
269          #DBSDLS-end          
270  
271          self.tgzNameWithPath = self.getTarBall(self.executable)
217
218        # modify Pset
219        if (self.datasetPath): # standard job
220            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
221            self.PsetEdit.inputModule("INPUT") #Daniele
222
223        else:  # pythia like job
224            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
225            self.PsetEdit.pythiaSeed("INPUT") #Daniele
226            try:
227                self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
228            except KeyError:
229                self.sourceSeed = 123456
230                common.logger.message("No seed given, will use "+str(self.sourceSeed))
231        
232        self.PsetEdit.psetWriter(self.configFilename())
272      
273          ## Select Splitting
274 <        if self.selectFilesPerJob: self.jobSplittingPerFiles()
275 <        elif self.selectEventsPerJob: self.jobSplittingPerEvents()
274 >        if self.selectNoInput:
275 >            if self.pset == None: #CarlosDaniele
276 >                self.jobSplittingForScript()
277 >            else:
278 >                self.jobSplittingNoInput()
279          else:
280 <            msg = 'Don\'t know how to split...'
239 <            raise CrabException(msg)
280 >            self.jobSplittingByBlocks(blockSites)
281  
282 +        # modify Pset
283 +        if self.pset != None: #CarlosDaniele
284 +            try:
285 +                if (self.datasetPath): # standard job
286 +                    # allow to processa a fraction of events in a file
287 +                    PsetEdit.inputModule("INPUT")
288 +                    PsetEdit.maxEvent("INPUTMAXEVENTS")
289 +                    PsetEdit.skipEvent("INPUTSKIPEVENTS")
290 +                else:  # pythia like job
291 +                    PsetEdit.maxEvent(self.eventsPerJob)
292 +                    if (self.firstRun):
293 +                        PsetEdit.pythiaFirstRun("INPUTFIRSTRUN")  #First Run
294 +                    if (self.sourceSeed) :
295 +                        PsetEdit.pythiaSeed("INPUT")
296 +                        if (self.sourceSeedVtx) :
297 +                            PsetEdit.vtxSeed("INPUTVTX")
298 +                        if (self.sourceSeedG4) :
299 +                            self.PsetEdit.g4Seed("INPUTG4")
300 +                        if (self.sourceSeedMix) :
301 +                            self.PsetEdit.mixSeed("INPUTMIX")
302 +                # add FrameworkJobReport to parameter-set
303 +                PsetEdit.addCrabFJR(self.fjrFileName)
304 +                PsetEdit.psetWriter(self.configFilename())
305 +            except:
306 +                msg='Error while manipuliating ParameterSet: exiting...'
307 +                raise CrabException(msg)
308  
309      def DataDiscoveryAndLocation(self, cfg_params):
310  
311 +        import DataDiscovery
312 +        import DataDiscovery_DBS2
313 +        import DataLocation
314          common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()")
315  
316          datasetPath=self.datasetPath
317  
248        ## TODO
249        dataTiersList = ""
250        dataTiers = dataTiersList.split(',')
251
318          ## Contact the DBS
319 +        common.logger.message("Contacting Data Discovery Services ...")
320          try:
321 <            self.pubdata=DataDiscovery_EDM.DataDiscovery_EDM(datasetPath, dataTiers, cfg_params)
321 >
322 >            if self.use_dbs_1 == 1 :
323 >                self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
324 >            else :
325 >                self.pubdata=DataDiscovery_DBS2.DataDiscovery_DBS2(datasetPath, cfg_params)
326              self.pubdata.fetchDBSInfo()
327  
328 <        except DataDiscovery_EDM.NotExistingDatasetError, ex :
328 >        except DataDiscovery.NotExistingDatasetError, ex :
329              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
330              raise CrabException(msg)
331 <
332 <        except DataDiscovery_EDM.NoDataTierinProvenanceError, ex :
331 >        except DataDiscovery.NoDataTierinProvenanceError, ex :
332 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
333 >            raise CrabException(msg)
334 >        except DataDiscovery.DataDiscoveryError, ex:
335 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
336 >            raise CrabException(msg)
337 >        except DataDiscovery_DBS2.NotExistingDatasetError_DBS2, ex :
338              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
339              raise CrabException(msg)
340 <        except DataDiscovery_EDM.DataDiscoveryError, ex:
341 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
340 >        except DataDiscovery_DBS2.NoDataTierinProvenanceError_DBS2, ex :
341 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
342 >            raise CrabException(msg)
343 >        except DataDiscovery_DBS2.DataDiscoveryError_DBS2, ex:
344 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
345              raise CrabException(msg)
346  
347 <        ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
348 <        ## self.DBSPaths=self.pubdata.getDBSPaths()
349 <        common.logger.message("Required data are :"+self.datasetPath)
271 <
272 <        filesbyblock=self.pubdata.getFiles()
273 <        self.AllInputFiles=filesbyblock.values()
274 <        self.files = self.AllInputFiles        
275 <
276 <        ## TEMP
277 <    #    self.filesTmp = filesbyblock.values()
278 <    #    self.files = []
279 <    #    locPath='rfio:cmsbose2.bo.infn.it:/flatfiles/SE00/cms/fanfani/ProdTest/'
280 <    #    locPath=''
281 <    #    tmp = []
282 <    #    for file in self.filesTmp[0]:
283 <    #        tmp.append(locPath+file)
284 <    #    self.files.append(tmp)
285 <        ## END TEMP
347 >        self.filesbyblock=self.pubdata.getFiles()
348 >        self.eventsbyblock=self.pubdata.getEventsPerBlock()
349 >        self.eventsbyfile=self.pubdata.getEventsPerFile()
350  
351          ## get max number of events
288        #common.logger.debug(10,"number of events for primary fileblocks %i"%self.pubdata.getMaxEvents())
352          self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
290        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
353  
354          ## Contact the DLS and build a list of sites hosting the fileblocks
355          try:
356 <            dataloc=DataLocation_EDM.DataLocation_EDM(filesbyblock.keys(),cfg_params)
356 >            dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params)
357              dataloc.fetchDLSInfo()
358 <        except DataLocation_EDM.DataLocationError , ex:
358 >        except DataLocation.DataLocationError , ex:
359              msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage()
360              raise CrabException(msg)
361          
300        allsites=dataloc.getSites()
301        common.logger.debug(5,"sites are %s"%allsites)
302        sites=self.checkBlackList(allsites)
303        common.logger.debug(5,"sites are (after black list) %s"%sites)
304        sites=self.checkWhiteList(sites)
305        common.logger.debug(5,"sites are (after white list) %s"%sites)
362  
363 <        if len(sites)==0:
364 <            msg = 'No sites hosting all the needed data! Exiting... '
365 <            raise CrabException(msg)
363 >        sites = dataloc.getSites()
364 >        allSites = []
365 >        listSites = sites.values()
366 >        for listSite in listSites:
367 >            for oneSite in listSite:
368 >                allSites.append(oneSite)
369 >        allSites = self.uniquelist(allSites)
370  
371 <        common.logger.message("List of Sites hosting the data : "+str(sites))
372 <        common.logger.debug(6, "List of Sites: "+str(sites))
373 <        common.analisys_common_info['sites']=sites    ## used in SchedulerEdg.py in createSchScript
374 <        self.setParam_('TargetCE', ','.join(sites))
315 <        return
371 >        # screen output
372 >        common.logger.message("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n")
373 >
374 >        return sites
375      
376 <    def jobSplittingPerFiles(self):
376 >    def jobSplittingByBlocks(self, blockSites):
377          """
378 <        Perform job splitting based on number of files to be accessed per job
379 <        """
380 <        common.logger.debug(5,'Splitting per input files')
381 <        common.logger.message('Required '+str(self.filesPerJob)+' files per job ')
382 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
383 <
384 <        ## TODO: SL need to have (from DBS) a detailed list of how many events per each file
385 <        n_tot_files = (len(self.files[0]))
386 <        #print "n_tot_files = ", n_tot_files
387 <        ## SL: this is wrong if the files have different number of events
388 <        #print "self.maxEvents = ", self.maxEvents
389 <        evPerFile = int(self.maxEvents)/n_tot_files
390 <        #print "evPerFile = int(self.maxEvents)/n_tot_files =  ", evPerFile
391 <
392 <        common.logger.debug(5,'Events per File '+str(evPerFile))
393 <
394 <        ## if asked to process all events, do it
395 <        if self.total_number_of_events == -1:
396 <            self.total_number_of_events=self.maxEvents
397 <            self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
398 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for all available events '+str(self.total_number_of_events)+' events')
399 <        
378 >        Perform job splitting. Jobs run over an integer number of files
379 >        and no more than one block.
380 >        ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values
381 >        REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
382 >                  self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
383 >                  self.maxEvents, self.filesbyblock
384 >        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
385 >              self.total_number_of_jobs - Total # of jobs
386 >              self.list_of_args - File(s) job will run on (a list of lists)
387 >        """
388 >
389 >        # ---- Handle the possible job splitting configurations ---- #
390 >        if (self.selectTotalNumberEvents):
391 >            totalEventsRequested = self.total_number_of_events
392 >        if (self.selectEventsPerJob):
393 >            eventsPerJobRequested = self.eventsPerJob
394 >            if (self.selectNumberOfJobs):
395 >                totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob
396 >
397 >        # If user requested all the events in the dataset
398 >        if (totalEventsRequested == -1):
399 >            eventsRemaining=self.maxEvents
400 >        # If user requested more events than are in the dataset
401 >        elif (totalEventsRequested > self.maxEvents):
402 >            eventsRemaining = self.maxEvents
403 >            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
404 >        # If user requested less events than are in the dataset
405          else:
406 <            #print "self.total_number_of_events = ", self.total_number_of_events
407 <            #print "evPerFile = ", evPerFile
408 <            self.total_number_of_files = int(self.total_number_of_events/evPerFile)
409 <            #print "self.total_number_of_files = int(self.total_number_of_events/evPerFile) = " , self.total_number_of_files
410 <            ## SL: if ask for less event than what is computed to be available on a
347 <            ##     file, process the first file anyhow.
348 <            if self.total_number_of_files == 0:
349 <                self.total_number_of_files = self.total_number_of_files + 1
350 <                
406 >            eventsRemaining = totalEventsRequested
407 >
408 >        # If user requested more events per job than are in the dataset
409 >        if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents):
410 >            eventsPerJobRequested = self.maxEvents
411  
412 <            common.logger.debug(5,'N files  '+str(self.total_number_of_files))
412 >        # For user info at end
413 >        totalEventCount = 0
414  
415 <            check = 0
415 >        if (self.selectTotalNumberEvents and self.selectNumberOfJobs):
416 >            eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
417 >
418 >        if (self.selectNumberOfJobs):
419 >            common.logger.message("May not create the exact number_of_jobs requested.")
420 >
421 >        if ( self.ncjobs == 'all' ) :
422 >            totalNumberOfJobs = 999999999
423 >        else :
424 >            totalNumberOfJobs = self.ncjobs
425              
356            ## Compute the number of jobs
357            #self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
358            #print "self.total_number_of_files = ", self.total_number_of_files
359            #print "self.filesPerJob = ", self.filesPerJob
360            self.total_number_of_jobs = int(self.total_number_of_files/self.filesPerJob)
361            #print "self.total_number_of_jobs = ", self.total_number_of_jobs
362            common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
363
364            ## is there any remainder?
365            check = int(self.total_number_of_files) - (int(self.total_number_of_jobs)*self.filesPerJob)
366
367            common.logger.debug(5,'Check  '+str(check))
368
369            if check > 0:
370                self.total_number_of_jobs =  self.total_number_of_jobs + 1
371                common.logger.message('Warning: last job will be created with '+str(check)+' files')
426  
427 <            #common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs-1)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
428 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
429 <            pass
427 >        blocks = blockSites.keys()
428 >        blockCount = 0
429 >        # Backup variable in case self.maxEvents counted events in a non-included block
430 >        numBlocksInDataset = len(blocks)
431  
432 +        jobCount = 0
433          list_of_lists = []
434 <        for i in xrange(0, int(n_tot_files), self.filesPerJob):
435 <            parString = "\\{"
434 >
435 >        # list tracking which jobs are in which jobs belong to which block
436 >        jobsOfBlock = {}
437 >
438 >        # ---- Iterate over the blocks in the dataset until ---- #
439 >        # ---- we've met the requested total # of events    ---- #
440 >        while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
441 >            block = blocks[blockCount]
442 >            blockCount += 1
443              
444 <            params = self.files[0][i: i+self.filesPerJob]
445 <            for i in range(len(params) - 1):
446 <                parString += '\\\"' + params[i] + '\\\"\,'
444 >            if self.eventsbyblock.has_key(block) :
445 >                numEventsInBlock = self.eventsbyblock[block]
446 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
447              
448 <            parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
449 <            list_of_lists.append(parString)
450 <            pass
448 >                files = self.filesbyblock[block]
449 >                numFilesInBlock = len(files)
450 >                if (numFilesInBlock <= 0):
451 >                    continue
452 >                fileCount = 0
453 >
454 >                # ---- New block => New job ---- #
455 >                parString = "\\{"
456 >                # counter for number of events in files currently worked on
457 >                filesEventCount = 0
458 >                # flag if next while loop should touch new file
459 >                newFile = 1
460 >                # job event counter
461 >                jobSkipEventCount = 0
462 >            
463 >                # ---- Iterate over the files in the block until we've met the requested ---- #
464 >                # ---- total # of events or we've gone over all the files in this block  ---- #
465 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
466 >                    file = files[fileCount]
467 >                    if newFile :
468 >                        try:
469 >                            numEventsInFile = self.eventsbyfile[file]
470 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
471 >                            # increase filesEventCount
472 >                            filesEventCount += numEventsInFile
473 >                            # Add file to current job
474 >                            parString += '\\\"' + file + '\\\"\,'
475 >                            newFile = 0
476 >                        except KeyError:
477 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
478 >                        
479 >
480 >                    # if less events in file remain than eventsPerJobRequested
481 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) :
482 >                        # if last file in block
483 >                        if ( fileCount == numFilesInBlock-1 ) :
484 >                            # end job using last file, use remaining events in block
485 >                            # close job and touch new file
486 >                            fullString = parString[:-2]
487 >                            fullString += '\\}'
488 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
489 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
490 >                            self.jobDestination.append(blockSites[block])
491 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
492 >                            # fill jobs of block dictionary
493 >                            if block in jobsOfBlock.keys() :
494 >                                jobsOfBlock[block].append(jobCount+1)
495 >                            else:
496 >                                jobsOfBlock[block] = [jobCount+1]
497 >                            # reset counter
498 >                            jobCount = jobCount + 1
499 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
500 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
501 >                            jobSkipEventCount = 0
502 >                            # reset file
503 >                            parString = "\\{"
504 >                            filesEventCount = 0
505 >                            newFile = 1
506 >                            fileCount += 1
507 >                        else :
508 >                            # go to next file
509 >                            newFile = 1
510 >                            fileCount += 1
511 >                    # if events in file equal to eventsPerJobRequested
512 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
513 >                        # close job and touch new file
514 >                        fullString = parString[:-2]
515 >                        fullString += '\\}'
516 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
517 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
518 >                        self.jobDestination.append(blockSites[block])
519 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
520 >                        if block in jobsOfBlock.keys() :
521 >                            jobsOfBlock[block].append(jobCount+1)
522 >                        else:
523 >                            jobsOfBlock[block] = [jobCount+1]
524 >                        # reset counter
525 >                        jobCount = jobCount + 1
526 >                        totalEventCount = totalEventCount + eventsPerJobRequested
527 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
528 >                        jobSkipEventCount = 0
529 >                        # reset file
530 >                        parString = "\\{"
531 >                        filesEventCount = 0
532 >                        newFile = 1
533 >                        fileCount += 1
534 >                        
535 >                    # if more events in file remain than eventsPerJobRequested
536 >                    else :
537 >                        # close job but don't touch new file
538 >                        fullString = parString[:-2]
539 >                        fullString += '\\}'
540 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
541 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
542 >                        self.jobDestination.append(blockSites[block])
543 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
544 >                        if block in jobsOfBlock.keys() :
545 >                            jobsOfBlock[block].append(jobCount+1)
546 >                        else:
547 >                            jobsOfBlock[block] = [jobCount+1]
548 >                        # increase counter
549 >                        jobCount = jobCount + 1
550 >                        totalEventCount = totalEventCount + eventsPerJobRequested
551 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
552 >                        # calculate skip events for last file
553 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
554 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
555 >                        # remove all but the last file
556 >                        filesEventCount = self.eventsbyfile[file]
557 >                        parString = "\\{"
558 >                        parString += '\\\"' + file + '\\\"\,'
559 >                    pass # END if
560 >                pass # END while (iterate over files in the block)
561 >        pass # END while (iterate over blocks in the dataset)
562 >        self.ncjobs = self.total_number_of_jobs = jobCount
563 >        if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
564 >            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
565 >        common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
566 >        
567 >        # screen output
568 >        screenOutput = "List of jobs and available destination sites:\n\n"
569 >
570 >        blockCounter = 0
571 >        for block in jobsOfBlock.keys():
572 >            blockCounter += 1
573 >            screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),','.join(blockSites[block]))
574 >
575 >        common.logger.message(screenOutput)
576  
577          self.list_of_args = list_of_lists
390        #print self.list_of_args
578          return
579  
580 <    def jobSplittingPerEvents(self):
580 >    def jobSplittingNoInput(self):
581          """
582          Perform job splitting based on number of event per job
583          """
584          common.logger.debug(5,'Splitting per events')
585          common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
586 +        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
587          common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
588  
589          if (self.total_number_of_events < 0):
590              msg='Cannot split jobs per Events with "-1" as total number of events'
591              raise CrabException(msg)
592  
593 <        self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
594 <
595 <        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
596 <        print "self.total_number_of_events = ", self.total_number_of_events
597 <        print "self.eventsPerJob = ", self.eventsPerJob
598 <        print "self.total_number_of_jobs = ", self.total_number_of_jobs
599 <        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
600 <        
593 >        if (self.selectEventsPerJob):
594 >            if (self.selectTotalNumberEvents):
595 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
596 >            elif(self.selectNumberOfJobs) :  
597 >                self.total_number_of_jobs =self.theNumberOfJobs
598 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
599 >
600 >        elif (self.selectNumberOfJobs) :
601 >            self.total_number_of_jobs = self.theNumberOfJobs
602 >            self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
603 >
604          common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
605  
606          # is there any remainder?
# Line 417 | Line 608 | class Cmssw(JobType):
608  
609          common.logger.debug(5,'Check  '+str(check))
610  
611 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
612          if check > 0:
613 <            common.logger.message('Warning: asked '+self.total_number_of_events+' but will do only '+(int(self.total_number_of_jobs)*self.eventsPerJob))
422 <
423 <        common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
613 >            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
614  
615          # argument is seed number.$i
616          self.list_of_args = []
617          for i in range(self.total_number_of_jobs):
618 <            self.list_of_args.append(int(str(self.sourceSeed)+str(i)))
619 <        print self.list_of_args
618 >            ## Since there is no input, any site is good
619 >           # self.jobDestination.append(["Any"])
620 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
621 >            args=[]
622 >            if (self.firstRun):
623 >                    ## pythia first run
624 >                #self.list_of_args.append([(str(self.firstRun)+str(i))])
625 >                args.append(str(self.firstRun)+str(i))
626 >            else:
627 >                ## no first run
628 >                #self.list_of_args.append([str(i)])
629 >                args.append(str(i))
630 >            if (self.sourceSeed):
631 >                args.append(str(self.sourceSeed)+str(i))
632 >                if (self.sourceSeedVtx):
633 >                    ## + vtx random seed
634 >                    args.append(str(self.sourceSeedVtx)+str(i))
635 >                if (self.sourceSeedG4):
636 >                    ## + G4 random seed
637 >                    args.append(str(self.sourceSeedG4)+str(i))
638 >                if (self.sourceSeedMix):    
639 >                    ## + Mix random seed
640 >                    args.append(str(self.sourceSeedMix)+str(i))
641 >                pass
642 >            pass
643 >            self.list_of_args.append(args)
644 >        pass
645 >            
646 >        # print self.list_of_args
647 >
648 >        return
649 >
650 >
651 >    def jobSplittingForScript(self):#CarlosDaniele
652 >        """
653 >        Perform job splitting based on number of job
654 >        """
655 >        common.logger.debug(5,'Splitting per job')
656 >        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
657 >
658 >        self.total_number_of_jobs = self.theNumberOfJobs
659 >
660 >        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
661 >
662 >        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
663  
664 +        # argument is seed number.$i
665 +        self.list_of_args = []
666 +        for i in range(self.total_number_of_jobs):
667 +            ## Since there is no input, any site is good
668 +           # self.jobDestination.append(["Any"])
669 +            self.jobDestination.append([""])
670 +            ## no random seed
671 +            self.list_of_args.append([str(i)])
672          return
673  
674      def split(self, jobParams):
# Line 441 | Line 682 | class Cmssw(JobType):
682              jobParams.append("")
683          
684          for job in range(njobs):
685 <            jobParams[job] = str(arglist[job])
685 >            jobParams[job] = arglist[job]
686 >            # print str(arglist[job])
687 >            # print jobParams[job]
688              common.jobDB.setArguments(job, jobParams[job])
689 +            common.logger.debug(5,"Job "+str(job)+" Destination: "+str(self.jobDestination[job]))
690 +            common.jobDB.setDestination(job, self.jobDestination[job])
691  
692          common.jobDB.save()
693          return
694      
695      def getJobTypeArguments(self, nj, sched):
696 <        return common.jobDB.arguments(nj)
696 >        result = ''
697 >        for i in common.jobDB.arguments(nj):
698 >            result=result+str(i)+" "
699 >        return result
700    
701      def numberOfJobs(self):
702          # Fabio
703          return self.total_number_of_jobs
704  
457    def checkBlackList(self, allSites):
458        if len(self.reCEBlackList)==0: return allSites
459        sites = []
460        for site in allSites:
461            common.logger.debug(10,'Site '+site)
462            good=1
463            for re in self.reCEBlackList:
464                if re.search(site):
465                    common.logger.message('CE in black list, skipping site '+site)
466                    good=0
467                pass
468            if good: sites.append(site)
469        if len(sites) == 0:
470            common.logger.debug(3,"No sites found after BlackList")
471        return sites
472
473    def checkWhiteList(self, allSites):
474
475        if len(self.reCEWhiteList)==0: return allSites
476        sites = []
477        for site in allSites:
478            good=0
479            for re in self.reCEWhiteList:
480                if re.search(site):
481                    common.logger.debug(5,'CE in white list, adding site '+site)
482                    good=1
483                if not good: continue
484                sites.append(site)
485        if len(sites) == 0:
486            common.logger.message("No sites found after WhiteList\n")
487        else:
488            common.logger.debug(5,"Selected sites via WhiteList are "+str(sites)+"\n")
489        return sites
490
705      def getTarBall(self, exe):
706          """
707          Return the TarBall with lib and exe
708          """
709          
710          # if it exist, just return it
711 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
711 >        #
712 >        # Marco. Let's start to use relative path for Boss XML files
713 >        #
714 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
715          if os.path.exists(self.tgzNameWithPath):
716              return self.tgzNameWithPath
717  
# Line 508 | Line 725 | class Cmssw(JobType):
725          # First of all declare the user Scram area
726          swArea = self.scram.getSWArea_()
727          #print "swArea = ", swArea
728 <        swVersion = self.scram.getSWVersion()
729 <        #print "swVersion = ", swVersion
728 >        # swVersion = self.scram.getSWVersion()
729 >        # print "swVersion = ", swVersion
730          swReleaseTop = self.scram.getReleaseTop_()
731          #print "swReleaseTop = ", swReleaseTop
732          
# Line 517 | Line 734 | class Cmssw(JobType):
734          if swReleaseTop == '' or swArea == swReleaseTop:
735              return
736  
737 <        filesToBeTarred = []
738 <        ## First find the executable
739 <        if (self.executable != ''):
740 <            exeWithPath = self.scram.findFile_(executable)
741 < #           print exeWithPath
742 <            if ( not exeWithPath ):
743 <                raise CrabException('User executable '+executable+' not found')
744 <
745 <            ## then check if it's private or not
746 <            if exeWithPath.find(swReleaseTop) == -1:
747 <                # the exe is private, so we must ship
748 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
749 <                path = swArea+'/'
750 <                exe = string.replace(exeWithPath, path,'')
751 <                filesToBeTarred.append(exe)
752 <                pass
753 <            else:
754 <                # the exe is from release, we'll find it on WN
755 <                pass
756 <
757 <        ## Now get the libraries: only those in local working area
758 <        libDir = 'lib'
759 <        lib = swArea+'/' +libDir
760 <        common.logger.debug(5,"lib "+lib+" to be tarred")
761 <        if os.path.exists(lib):
762 <            filesToBeTarred.append(libDir)
763 <
764 <        ## Now check if module dir is present
765 <        moduleDir = 'module'
766 <        if os.path.isdir(swArea+'/'+moduleDir):
767 <            filesToBeTarred.append(moduleDir)
768 <
769 <        ## Now check if the Data dir is present
770 <        dataDir = 'src/Data/'
771 <        if os.path.isdir(swArea+'/'+dataDir):
772 <            filesToBeTarred.append(dataDir)
773 <
774 <        ## Create the tar-ball
775 <        if len(filesToBeTarred)>0:
776 <            cwd = os.getcwd()
777 <            os.chdir(swArea)
778 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
779 <            for line in filesToBeTarred:
780 <                tarcmd = tarcmd + line + ' '
781 <            cout = runCommand(tarcmd)
782 <            if not cout:
783 <                raise CrabException('Could not create tar-ball')
784 <            os.chdir(cwd)
785 <        else:
786 <            common.logger.debug(5,"No files to be to be tarred")
737 >        import tarfile
738 >        try: # create tar ball
739 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
740 >            ## First find the executable
741 >            if (self.executable != ''):
742 >                exeWithPath = self.scram.findFile_(executable)
743 >                if ( not exeWithPath ):
744 >                    raise CrabException('User executable '+executable+' not found')
745 >    
746 >                ## then check if it's private or not
747 >                if exeWithPath.find(swReleaseTop) == -1:
748 >                    # the exe is private, so we must ship
749 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
750 >                    path = swArea+'/'
751 >                    # distinguish case when script is in user project area or given by full path somewhere else
752 >                    if exeWithPath.find(path) >= 0 :
753 >                        exe = string.replace(exeWithPath, path,'')
754 >                        tar.add(path+exe,os.path.basename(executable))
755 >                    else :
756 >                        tar.add(exeWithPath,os.path.basename(executable))
757 >                    pass
758 >                else:
759 >                    # the exe is from release, we'll find it on WN
760 >                    pass
761 >    
762 >            ## Now get the libraries: only those in local working area
763 >            libDir = 'lib'
764 >            lib = swArea+'/' +libDir
765 >            common.logger.debug(5,"lib "+lib+" to be tarred")
766 >            if os.path.exists(lib):
767 >                tar.add(lib,libDir)
768 >    
769 >            ## Now check if module dir is present
770 >            moduleDir = 'module'
771 >            module = swArea + '/' + moduleDir
772 >            if os.path.isdir(module):
773 >                tar.add(module,moduleDir)
774 >
775 >            ## Now check if any data dir(s) is present
776 >            swAreaLen=len(swArea)
777 >            for root, dirs, files in os.walk(swArea):
778 >                if "data" in dirs:
779 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
780 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
781 >
782 >            ## Add ProdAgent dir to tar
783 >            paDir = 'ProdAgentApi'
784 >            pa = os.environ['CRABDIR'] + '/' + 'ProdAgentApi'
785 >            if os.path.isdir(pa):
786 >                tar.add(pa,paDir)
787 >
788 >            ### FEDE FOR DBS PUBLICATION
789 >            ## Add PRODCOMMON dir to tar
790 >            prodcommonDir = 'ProdCommon'
791 >            prodcommonPath = os.environ['CRABDIR'] + '/' + 'ProdCommon'
792 >            if os.path.isdir(prodcommonPath):
793 >                tar.add(prodcommonPath,prodcommonDir)
794 >            #############################    
795 >        
796 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
797 >            tar.close()
798 >        except :
799 >            raise CrabException('Could not create tar-ball')
800 >
801 >        ## check for tarball size
802 >        tarballinfo = os.stat(self.tgzNameWithPath)
803 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
804 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
805 >
806 >        ## create tar-ball with ML stuff
807 >        self.MLtgzfile =  common.work_space.pathForTgz()+'share/MLfiles.tgz'
808 >        try:
809 >            tar = tarfile.open(self.MLtgzfile, "w:gz")
810 >            path=os.environ['CRABDIR'] + '/python/'
811 >            for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py']:
812 >                tar.add(path+file,file)
813 >            common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames()))
814 >            tar.close()
815 >        except :
816 >            raise CrabException('Could not create ML files tar-ball')
817          
818          return
819          
# Line 583 | Line 830 | class Cmssw(JobType):
830          txt += 'if [ $middleware == LCG ]; then \n'
831          txt += self.wsSetupCMSLCGEnvironment_()
832          txt += 'elif [ $middleware == OSG ]; then\n'
833 <        txt += '    time=`date -u +"%s"`\n'
834 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
588 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
589 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
833 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
834 >        txt += '    echo "Created working directory: $WORKING_DIR"\n'
835          txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
836          txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
837 <        txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
838 <        txt += '        echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
839 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
837 >        txt += '    echo "JOB_EXIT_STATUS = 10016"\n'
838 >        txt += '    echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
839 >        txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
840          txt += '        rm -f $RUNTIME_AREA/$repo \n'
841          txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
597        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
842          txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
843          txt += '        exit 1\n'
844          txt += '    fi\n'
# Line 608 | Line 852 | class Cmssw(JobType):
852          scram = self.scram.commandName()
853          txt += '\n\n'
854          txt += 'echo "### SPECIFIC JOB SETUP ENVIRONMENT ###"\n'
855 +        txt += 'echo "Setting SCRAM_ARCH='+self.executable_arch+'"\n'
856 +        txt += 'export SCRAM_ARCH='+self.executable_arch+'\n'
857          txt += scram+' project CMSSW '+self.version+'\n'
858          txt += 'status=$?\n'
859          txt += 'if [ $status != 0 ] ; then\n'
# Line 617 | Line 863 | class Cmssw(JobType):
863          txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
864          txt += '   rm -f $RUNTIME_AREA/$repo \n'
865          txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
620        txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
866          txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
867          ## OLI_Daniele
868          txt += '    if [ $middleware == OSG ]; then \n'
# Line 625 | Line 870 | class Cmssw(JobType):
870          txt += '        cd $RUNTIME_AREA\n'
871          txt += '        /bin/rm -rf $WORKING_DIR\n'
872          txt += '        if [ -d $WORKING_DIR ] ;then\n'
873 <        txt += '            echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
874 <        txt += '            echo "JOB_EXIT_STATUS = 10018"\n'
875 <        txt += '            echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
876 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
873 >        txt += '        echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
874 >        txt += '        echo "JOB_EXIT_STATUS = 10018"\n'
875 >        txt += '        echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
876 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
877          txt += '            rm -f $RUNTIME_AREA/$repo \n'
878          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
634        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
879          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
880          txt += '        fi\n'
881          txt += '    fi \n'
# Line 640 | Line 884 | class Cmssw(JobType):
884          txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
885          txt += 'cd '+self.version+'\n'
886          ### needed grep for bug in scramv1 ###
887 +        txt += scram+' runtime -sh\n'
888          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
889 +        txt += 'echo $PATH\n'
890  
891          # Handle the arguments:
892          txt += "\n"
893          txt += "## number of arguments (first argument always jobnumber)\n"
894          txt += "\n"
895 <        txt += "narg=$#\n"
896 <        txt += "if [ $narg -lt 2 ]\n"
895 > #        txt += "narg=$#\n"
896 >        txt += "if [ $nargs -lt 2 ]\n"
897          txt += "then\n"
898 <        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$narg+ \n"
898 >        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$nargs+ \n"
899          txt += '    echo "JOB_EXIT_STATUS = 50113"\n'
900          txt += '    echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
901          txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
902          txt += '    rm -f $RUNTIME_AREA/$repo \n'
903          txt += '    echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
658        txt += '    echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
904          txt += '    echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
905          ## OLI_Daniele
906          txt += '    if [ $middleware == OSG ]; then \n'
# Line 663 | Line 908 | class Cmssw(JobType):
908          txt += '        cd $RUNTIME_AREA\n'
909          txt += '        /bin/rm -rf $WORKING_DIR\n'
910          txt += '        if [ -d $WORKING_DIR ] ;then\n'
911 <        txt += '            echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
912 <        txt += '            echo "JOB_EXIT_STATUS = 50114"\n'
913 <        txt += '            echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
914 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
911 >        txt += '        echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
912 >        txt += '        echo "JOB_EXIT_STATUS = 50114"\n'
913 >        txt += '        echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
914 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
915          txt += '            rm -f $RUNTIME_AREA/$repo \n'
916          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
672        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
917          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
918          txt += '        fi\n'
919          txt += '    fi \n'
# Line 679 | Line 923 | class Cmssw(JobType):
923  
924          # Prepare job-specific part
925          job = common.job_list[nj]
926 <        pset = os.path.basename(job.configFilename())
927 <        txt += '\n'
928 <        if (self.datasetPath): # standard job
929 <            txt += 'InputFiles=$2\n'
930 <            txt += 'echo "Inputfiles:<$InputFiles>"\n'
931 <            txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
932 <        else:  # pythia like job
933 <            txt += 'Seed=$2\n'
934 <            txt += 'echo "Seed: <$Seed>"\n'
935 <            txt += 'sed "s#INPUT#$Seed#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
926 >        ### FEDE FOR DBS OUTPUT PUBLICATION
927 >        if (self.datasetPath):
928 >            txt += '\n'
929 >            txt += 'DatasetPath='+self.datasetPath+'\n'
930 >
931 >            datasetpath_split = self.datasetPath.split("/")
932 >            
933 >            txt += 'PrimaryDataset='+datasetpath_split[1]+'\n'
934 >            txt += 'DataTier='+datasetpath_split[2]+'\n'
935 >            txt += 'ProcessedDataset='+datasetpath_split[3]+'\n'
936 >            txt += 'ApplicationFamily=Online\n'
937 >
938 >        else:
939 >            txt += 'DatasetPath=MCDataTier\n'
940 >            txt += 'PrimaryDataset=null\n'
941 >            txt += 'DataTier=null\n'
942 >            txt += 'ProcessedDataset=null\n'
943 >            txt += 'ApplicationFamily=MCDataTier\n'
944 >        ##################    
945 >        if self.pset != None: #CarlosDaniele
946 >            pset = os.path.basename(job.configFilename())
947 >            txt += '\n'
948 >            if (self.datasetPath): # standard job
949 >                #txt += 'InputFiles=$2\n'
950 >                txt += 'InputFiles=${args[1]}\n'
951 >                txt += 'MaxEvents=${args[2]}\n'
952 >                txt += 'SkipEvents=${args[3]}\n'
953 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
954 >                txt += 'sed "s#{\'INPUT\'}#$InputFiles#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
955 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
956 >                txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
957 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
958 >                txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
959 >            else:  # pythia like job
960 >                seedIndex=1
961 >                if (self.firstRun):
962 >                    txt += 'FirstRun=${args['+str(seedIndex)+']}\n'
963 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
964 >                    txt += 'sed "s#\<INPUTFIRSTRUN\>#$FirstRun#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
965 >                    seedIndex=seedIndex+1
966 >
967 >                if (self.sourceSeed):
968 >                    txt += 'Seed=${args['+str(seedIndex)+']}\n'
969 >                    txt += 'sed "s#\<INPUT\>#$Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
970 >                    seedIndex=seedIndex+1
971 >                    ## the following seeds are not always present
972 >                    if (self.sourceSeedVtx):
973 >                        txt += 'VtxSeed=${args['+str(seedIndex)+']}\n'
974 >                        txt += 'echo "VtxSeed: <$VtxSeed>"\n'
975 >                        txt += 'sed "s#\<INPUTVTX\>#$VtxSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
976 >                        seedIndex += 1
977 >                    if (self.sourceSeedG4):
978 >                        txt += 'G4Seed=${args['+str(seedIndex)+']}\n'
979 >                        txt += 'echo "G4Seed: <$G4Seed>"\n'
980 >                        txt += 'sed "s#\<INPUTG4\>#$G4Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
981 >                        seedIndex += 1
982 >                    if (self.sourceSeedMix):
983 >                        txt += 'mixSeed=${args['+str(seedIndex)+']}\n'
984 >                        txt += 'echo "MixSeed: <$mixSeed>"\n'
985 >                        txt += 'sed "s#\<INPUTMIX\>#$mixSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
986 >                        seedIndex += 1
987 >                    pass
988 >                pass
989 >            txt += 'mv -f '+pset+' pset.cfg\n'
990  
991          if len(self.additional_inbox_files) > 0:
992              for file in self.additional_inbox_files:
993 <                txt += 'if [ -e $RUNTIME_AREA/'+file+' ] ; then\n'
994 <                txt += '   cp $RUNTIME_AREA/'+file+' .\n'
995 <                txt += '   chmod +x '+file+'\n'
993 >                relFile = file.split("/")[-1]
994 >                txt += 'if [ -e $RUNTIME_AREA/'+relFile+' ] ; then\n'
995 >                txt += '   cp $RUNTIME_AREA/'+relFile+' .\n'
996 >                txt += '   chmod +x '+relFile+'\n'
997                  txt += 'fi\n'
998              pass
999  
1000 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
1001 <
1002 <        txt += '\n'
1003 <        txt += 'echo "***** cat pset.cfg *********"\n'
1004 <        txt += 'cat pset.cfg\n'
1005 <        txt += 'echo "****** end pset.cfg ********"\n'
1006 <        txt += '\n'
1007 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
1008 <        # txt += 'cat pset1.cfg\n'
1009 <        # txt += 'echo "****** end pset1.cfg ********"\n'
1000 >        if self.pset != None: #CarlosDaniele
1001 >            txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
1002 >        
1003 >            txt += '\n'
1004 >            txt += 'echo "***** cat pset.cfg *********"\n'
1005 >            txt += 'cat pset.cfg\n'
1006 >            txt += 'echo "****** end pset.cfg ********"\n'
1007 >            txt += '\n'
1008 >            ### FEDE FOR DBS OUTPUT PUBLICATION
1009 >            txt += 'PSETHASH=`EdmConfigHash < pset.cfg`'
1010 >            ##############
1011 >            txt += '\n'
1012 >            # txt += 'echo "***** cat pset1.cfg *********"\n'
1013 >            # txt += 'cat pset1.cfg\n'
1014 >            # txt += 'echo "****** end pset1.cfg ********"\n'
1015          return txt
1016  
1017 <    def wsBuildExe(self, nj):
1017 >    def wsBuildExe(self, nj=0):
1018          """
1019          Put in the script the commands to build an executable
1020          or a library.
# Line 737 | Line 1041 | class Cmssw(JobType):
1041              txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1042              txt += '           rm -f $RUNTIME_AREA/$repo \n'
1043              txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
740            txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1044              txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1045              txt += '       fi\n'
1046              txt += '   fi \n'
# Line 746 | Line 1049 | class Cmssw(JobType):
1049              txt += 'else \n'
1050              txt += '   echo "Successful untar" \n'
1051              txt += 'fi \n'
1052 +            txt += '\n'
1053 +            txt += 'echo "Include ProdAgentApi and PRODCOMMON in PYTHONPATH"\n'
1054 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
1055 +            #### FEDE FOR DBS OUTPUT PUBLICATION
1056 +            txt += '   export PYTHONPATH=`pwd`/ProdAgentApi:`pwd`/ProdCommon\n'
1057 +            #txt += '   export PYTHONPATH=ProdAgentApi\n'
1058 +            txt += 'else\n'
1059 +            txt += '   export PYTHONPATH=`pwd`/ProdAgentApi:`pwd`/ProdCommon:${PYTHONPATH}\n'
1060 +            #txt += '   export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n'
1061 +            txt += 'echo "PYTHONPATH=$PYTHONPATH"\n'
1062 +            ###################  
1063 +            txt += 'fi\n'
1064 +            txt += '\n'
1065 +
1066              pass
1067          
1068          return txt
# Line 757 | Line 1074 | class Cmssw(JobType):
1074          """
1075          
1076      def executableName(self):
1077 <        return self.executable
1077 >        if self.scriptExe: #CarlosDaniele
1078 >            return "sh "
1079 >        else:
1080 >            return self.executable
1081  
1082      def executableArgs(self):
1083 <        return " -p pset.cfg"
1083 >        if self.scriptExe:#CarlosDaniele
1084 >            return   self.scriptExe + " $NJob"
1085 >        else:
1086 >            return " -p pset.cfg"
1087  
1088      def inputSandbox(self, nj):
1089          """
1090          Returns a list of filenames to be put in JDL input sandbox.
1091          """
1092          inp_box = []
1093 <        # dict added to delete duplicate from input sandbox file list
1094 <        seen = {}
1093 >        # # dict added to delete duplicate from input sandbox file list
1094 >        # seen = {}
1095          ## code
1096          if os.path.isfile(self.tgzNameWithPath):
1097              inp_box.append(self.tgzNameWithPath)
1098 +        if os.path.isfile(self.MLtgzfile):
1099 +            inp_box.append(self.MLtgzfile)
1100          ## config
1101 <        inp_box.append(common.job_list[nj].configFilename())
1101 >        if not self.pset is None:
1102 >            inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename())
1103          ## additional input files
1104 <        #for file in self.additional_inbox_files:
1105 <        #    inp_box.append(common.work_space.cwdDir()+file)
1104 >        for file in self.additional_inbox_files:
1105 >            inp_box.append(file)
1106          return inp_box
1107  
1108      def outputSandbox(self, nj):
# Line 785 | Line 1111 | class Cmssw(JobType):
1111          """
1112          out_box = []
1113  
788        stdout=common.job_list[nj].stdout()
789        stderr=common.job_list[nj].stderr()
790
1114          ## User Declared output files
1115 <        for out in self.output_file:
1115 >        for out in (self.output_file+self.output_file_sandbox):
1116              n_out = nj + 1
1117              out_box.append(self.numberFile_(out,str(n_out)))
1118          return out_box
796        return []
1119  
1120      def prepareSteeringCards(self):
1121          """
# Line 809 | Line 1131 | class Cmssw(JobType):
1131          txt = '\n'
1132          txt += '# directory content\n'
1133          txt += 'ls \n'
1134 <        file_list = ''
1135 <        for fileWithSuffix in self.output_file:
1134 >
1135 >        for fileWithSuffix in (self.output_file+self.output_file_sandbox):
1136              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
815            file_list=file_list+output_file_num+' '
1137              txt += '\n'
1138              txt += '# check output file\n'
1139              txt += 'ls '+fileWithSuffix+'\n'
1140 <            txt += 'exe_result=$?\n'
1141 <            txt += 'if [ $exe_result -ne 0 ] ; then\n'
1142 <            txt += '   echo "ERROR: No output file to manage"\n'
1143 <            txt += '   echo "JOB_EXIT_STATUS = $exe_result"\n'
823 <            txt += '   echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
824 <            txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
825 <            txt += '   rm -f $RUNTIME_AREA/$repo \n'
826 <            txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
827 <            txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
828 <            txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
829 <            ### OLI_DANIELE
1140 >            txt += 'ls_result=$?\n'
1141 >            txt += 'if [ $ls_result -ne 0 ] ; then\n'
1142 >            txt += '   exit_status=60302\n'
1143 >            txt += '   echo "ERROR: Problem with output file"\n'
1144              if common.scheduler.boss_scheduler_name == 'condor_g':
1145                  txt += '    if [ $middleware == OSG ]; then \n'
1146                  txt += '        echo "prepare dummy output file"\n'
1147                  txt += '        echo "Processing of job output failed" > $RUNTIME_AREA/'+output_file_num+'\n'
1148                  txt += '    fi \n'
1149              txt += 'else\n'
1150 <            txt += '   cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1150 >            ### FEDE FOR DBS OUTPUT PUBLICATION
1151 >            txt += '   mv '+fileWithSuffix+' $RUNTIME_AREA\n'
1152 >            txt += '   cp $RUNTIME_AREA/'+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1153 >            #################################
1154              txt += 'fi\n'
1155        
1156          txt += 'cd $RUNTIME_AREA\n'
840        file_list=file_list[:-1]
841        txt += 'file_list="'+file_list+'"\n'
1157          ### OLI_DANIELE
1158          txt += 'if [ $middleware == OSG ]; then\n'  
1159          txt += '    cd $RUNTIME_AREA\n'
1160          txt += '    echo "Remove working directory: $WORKING_DIR"\n'
1161          txt += '    /bin/rm -rf $WORKING_DIR\n'
1162          txt += '    if [ -d $WORKING_DIR ] ;then\n'
1163 <        txt += '        echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1164 <        txt += '        echo "JOB_EXIT_STATUS = 60999"\n'
1165 <        txt += '        echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1166 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1163 >        txt += '    echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1164 >        txt += '    echo "JOB_EXIT_STATUS = 60999"\n'
1165 >        txt += '    echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1166 >        txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
1167          txt += '        rm -f $RUNTIME_AREA/$repo \n'
1168          txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
854        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1169          txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1170          txt += '    fi\n'
1171          txt += 'fi\n'
1172          txt += '\n'
1173 +
1174 +        file_list = ''
1175 +        ## Add to filelist only files to be possibly copied to SE
1176 +        for fileWithSuffix in self.output_file:
1177 +            output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
1178 +            file_list=file_list+output_file_num+' '
1179 +        file_list=file_list[:-1]
1180 +        txt += 'file_list="'+file_list+'"\n'
1181 +
1182          return txt
1183  
1184      def numberFile_(self, file, txt):
# Line 866 | Line 1189 | class Cmssw(JobType):
1189          # take away last extension
1190          name = p[0]
1191          for x in p[1:-1]:
1192 <           name=name+"."+x
1192 >            name=name+"."+x
1193          # add "_txt"
1194          if len(p)>1:
1195 <          ext = p[len(p)-1]
1196 <          #result = name + '_' + str(txt) + "." + ext
874 <          result = name + '_' + txt + "." + ext
1195 >            ext = p[len(p)-1]
1196 >            result = name + '_' + txt + "." + ext
1197          else:
1198 <          #result = name + '_' + str(txt)
877 <          result = name + '_' + txt
1198 >            result = name + '_' + txt
1199          
1200          return result
1201  
1202 <    def getRequirements(self):
1202 >    def getRequirements(self, nj=[]):
1203          """
1204          return job requirements to add to jdl files
1205          """
1206          req = ''
1207 <        if common.analisys_common_info['sw_version']:
1207 >        if self.version:
1208              req='Member("VO-cms-' + \
1209 <                 common.analisys_common_info['sw_version'] + \
1209 >                 self.version + \
1210                   '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1211 <        if common.analisys_common_info['sites']:
1212 <            if len(common.analisys_common_info['sites'])>0:
1213 <                req = req + ' && ('
1214 <                for i in range(len(common.analisys_common_info['sites'])):
1215 <                    req = req + 'other.GlueCEInfoHostName == "' \
1216 <                         + common.analisys_common_info['sites'][i] + '"'
1217 <                    if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
897 <                        req = req + ' || '
898 <            req = req + ')'
899 <        #print "req = ", req
1211 >        # if self.executable_arch:
1212 >        #     req='Member("VO-cms-' + \
1213 >        #          self.executable_arch + \
1214 >        #          '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1215 >
1216 >        req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
1217 >
1218          return req
1219  
1220      def configFilename(self):
# Line 913 | Line 1231 | class Cmssw(JobType):
1231          txt += '   echo "### SETUP CMS OSG  ENVIRONMENT ###"\n'
1232          txt += '   if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
1233          txt += '      # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
1234 +        txt += '       export SCRAM_ARCH='+self.executable_arch+'\n'
1235          txt += '       source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
1236 <        txt += '   elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
1237 <        txt += '      # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
1238 <        txt += '       source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
1236 >        txt += '   elif [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n'
1237 >        txt += '      # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n'
1238 >        txt += '       export SCRAM_ARCH='+self.executable_arch+'\n'
1239 >        txt += '       source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n'
1240          txt += '   else\n'
1241 <        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1241 >        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1242          txt += '       echo "JOB_EXIT_STATUS = 10020"\n'
1243          txt += '       echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1244          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1245          txt += '       rm -f $RUNTIME_AREA/$repo \n'
1246          txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
927        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1247          txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1248          txt += '       exit 1\n'
1249          txt += '\n'
# Line 932 | Line 1251 | class Cmssw(JobType):
1251          txt += '       cd $RUNTIME_AREA\n'
1252          txt += '       /bin/rm -rf $WORKING_DIR\n'
1253          txt += '       if [ -d $WORKING_DIR ] ;then\n'
1254 <        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1255 <        txt += '            echo "JOB_EXIT_STATUS = 10017"\n'
1256 <        txt += '            echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1257 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
1254 >        txt += '        echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1255 >        txt += '        echo "JOB_EXIT_STATUS = 10017"\n'
1256 >        txt += '        echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1257 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1258          txt += '            rm -f $RUNTIME_AREA/$repo \n'
1259          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
941        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1260          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1261          txt += '       fi\n'
1262          txt += '\n'
# Line 965 | Line 1283 | class Cmssw(JobType):
1283          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1284          txt += '       rm -f $RUNTIME_AREA/$repo \n'
1285          txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
968        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1286          txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1287          txt += '       exit 1\n'
1288          txt += '   else\n'
# Line 977 | Line 1294 | class Cmssw(JobType):
1294          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1295          txt += '           rm -f $RUNTIME_AREA/$repo \n'
1296          txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
980        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1297          txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1298          txt += '           exit 1\n'
1299          txt += '       fi\n'
# Line 991 | Line 1307 | class Cmssw(JobType):
1307          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1308          txt += '           rm -f $RUNTIME_AREA/$repo \n'
1309          txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
994        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1310          txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1311          txt += '           exit 1\n'
1312          txt += '       fi\n'
1313          txt += '   fi\n'
1314          txt += '   \n'
1000        txt += '   string=`cat /etc/redhat-release`\n'
1001        txt += '   echo $string\n'
1002        txt += '   if [[ $string = *alhalla* ]]; then\n'
1003        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1004        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
1005        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
1006        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1007        txt += '   else\n'
1008        txt += '       echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n'
1009        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
1010        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
1011        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1012        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1013        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1014        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1015        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1016        txt += '       exit 1\n'
1017        txt += '   fi\n'
1315          txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1316          txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1317          return txt
1318  
1319 +    ### FEDE FOR DBS OUTPUT PUBLICATION
1320 +    def modifyReport(self, nj):
1321 +        """
1322 +        insert the part of the script that modifies the FrameworkJob Report
1323 +        """
1324 +        
1325 +        txt = ''
1326 +        txt += 'echo "Modify Job Report" \n'
1327 +        txt += 'chmod a+x $RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py\n'
1328 +        txt += 'if [ -z "$SE" ]; then\n'
1329 +        txt += '    SE="" \n'
1330 +        txt += 'fi \n'
1331 +        txt += 'if [ -z "$SE_PATH" ]; then\n'
1332 +        txt += '    SE_PATH="" \n'
1333 +        txt += 'fi \n'
1334 +        txt += 'echo "SE = $SE"\n'
1335 +        txt += 'echo "SE_PATH = $SE_PATH"\n'
1336 +        txt += 'FOR_LFN=$DatasetPath/$MonitorID\n'
1337 +        txt += 'echo "FOR_LFN = $FOR_LFN"\n'
1338 +        txt += 'echo "CMSSW_VERSION = $CMSSW_VERSION"\n'
1339 +        txt += 'echo "$RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n'
1340 +        txt += '$RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n'
1341 +        txt += 'modifyReport_result=$?\n'
1342 +        txt += 'echo modifyReport_result = $modifyReport_result\n'
1343 +        txt += 'if [ $modify_result -ne 0 ]; then\n'
1344 +        txt += '    exit_status=1\n'
1345 +        txt += '    echo "ERROR: Problem with ModifyJobReport"\n'
1346 +        txt += 'else\n'
1347 +        txt += '    mv NewFrameworkJobReport.xml crab_fjr_$NJob.xml\n'
1348 +        txt += 'fi\n'
1349 +        return txt
1350 +    #################
1351 +
1352      def setParam_(self, param, value):
1353          self._params[param] = value
1354  
# Line 1030 | Line 1360 | class Cmssw(JobType):
1360          
1361      def getTaskid(self):
1362          return self._taskId
1363 +
1364 + #######################################################################
1365 +    def uniquelist(self, old):
1366 +        """
1367 +        remove duplicates from a list
1368 +        """
1369 +        nd={}
1370 +        for e in old:
1371 +            nd[e]=0
1372 +        return nd.keys()

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines