ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.7 by gutsche, Tue Jun 13 20:43:00 2006 UTC vs.
Revision 1.92 by gutsche, Tue Jun 19 16:21:49 2007 UTC

# Line 3 | Line 3 | from crab_logger import Logger
3   from crab_exceptions import *
4   from crab_util import *
5   import common
6 import PsetManipulator  
7
8 import DBSInfo_EDM
9 import DataDiscovery_EDM
10 import DataLocation_EDM
6   import Scram
7  
8 < import os, string, re
8 > import os, string, re, shutil, glob
9  
10   class Cmssw(JobType):
11 <    def __init__(self, cfg_params):
11 >    def __init__(self, cfg_params, ncjobs):
12          JobType.__init__(self, 'CMSSW')
13          common.logger.debug(3,'CMSSW::__init__')
14  
20        self.analisys_common_info = {}
15          # Marco.
16          self._params = {}
17          self.cfg_params = cfg_params
18  
19 +        try:
20 +            self.MaxTarBallSize = float(self.cfg_params['EDG.maxtarballsize'])
21 +        except KeyError:
22 +            self.MaxTarBallSize = 9.5
23 +
24 +        # number of jobs requested to be created, limit obj splitting
25 +        self.ncjobs = ncjobs
26 +
27          log = common.logger
28          
29          self.scram = Scram.Scram(cfg_params)
28        scramArea = ''
30          self.additional_inbox_files = []
31          self.scriptExe = ''
32          self.executable = ''
33 +        self.executable_arch = self.scram.getArch()
34          self.tgz_name = 'default.tgz'
35 +        self.scriptName = 'CMSSW.sh'
36 +        self.pset = ''      #scrip use case Da  
37 +        self.datasetPath = '' #scrip use case Da
38  
39 +        # set FJR file name
40 +        self.fjrFileName = 'crab_fjr.xml'
41  
42          self.version = self.scram.getSWVersion()
43 +        common.taskDB.setDict('codeVersion',self.version)
44          self.setParam_('application', self.version)
37        common.analisys_common_info['sw_version'] = self.version
38        ### FEDE
39        common.analisys_common_info['copy_input_data'] = 0
40        common.analisys_common_info['events_management'] = 1
45  
46          ### collect Data cards
47 +
48 +        ## get DBS mode
49          try:
50 <         #   self.owner = cfg_params['CMSSW.owner']
51 <         #   log.debug(6, "CMSSW::CMSSW(): owner = "+self.owner)
52 <         #   self.dataset = cfg_params['CMSSW.dataset']
53 <            self.datasetPath = cfg_params['CMSSW.datasetpath']
54 <            log.debug(6, "CMSSW::CMSSW(): datasetPath = "+self.datasetPath)
50 >            self.use_dbs_1 = int(self.cfg_params['CMSSW.use_dbs_1'])
51 >        except KeyError:
52 >            self.use_dbs_1 = 0
53 >            
54 >        try:
55 >            tmp =  cfg_params['CMSSW.datasetpath']
56 >            log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
57 >            if string.lower(tmp)=='none':
58 >                self.datasetPath = None
59 >                self.selectNoInput = 1
60 >            else:
61 >                self.datasetPath = tmp
62 >                self.selectNoInput = 0
63          except KeyError:
50        #    msg = "Error: owner and/or dataset not defined "
64              msg = "Error: datasetpath not defined "  
65              raise CrabException(msg)
66  
67          # ML monitoring
68          # split dataset path style: /PreProdR3Minbias/SIM/GEN-SIM
69 <        datasetpath_split = self.datasetPath.split("/")
70 <        self.setParam_('dataset', datasetpath_split[1])
71 <        self.setParam_('owner', datasetpath_split[-1])
72 <
73 <
74 <
69 >        if not self.datasetPath:
70 >            self.setParam_('dataset', 'None')
71 >            self.setParam_('owner', 'None')
72 >        else:
73 >            try:
74 >                datasetpath_split = self.datasetPath.split("/")
75 >                # standard style
76 >                if self.use_dbs_1 == 1 :
77 >                    self.setParam_('dataset', datasetpath_split[1])
78 >                    self.setParam_('owner', datasetpath_split[-1])
79 >                else:
80 >                    self.setParam_('dataset', datasetpath_split[1])
81 >                    self.setParam_('owner', datasetpath_split[2])
82 >            except:
83 >                self.setParam_('dataset', self.datasetPath)
84 >                self.setParam_('owner', self.datasetPath)
85 >                
86 >        self.setTaskid_()
87 >        self.setParam_('taskId', self.cfg_params['taskId'])
88  
89          self.dataTiers = []
64 #       try:
65 #           tmpDataTiers = string.split(cfg_params['CMSSW.data_tier'],',')
66 #           for tmp in tmpDataTiers:
67 #               tmp=string.strip(tmp)
68 #               self.dataTiers.append(tmp)
69 #               pass
70 #           pass
71 #       except KeyError:
72 #           pass
73 #       log.debug(6, "Cmssw::Cmssw(): dataTiers = "+str(self.dataTiers))
90  
91          ## now the application
92          try:
# Line 89 | Line 105 | class Cmssw(JobType):
105          try:
106              self.pset = cfg_params['CMSSW.pset']
107              log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
108 <            if (not os.path.exists(self.pset)):
109 <                raise CrabException("User defined PSet file "+self.pset+" does not exist")
108 >            if self.pset.lower() != 'none' :
109 >                if (not os.path.exists(self.pset)):
110 >                    raise CrabException("User defined PSet file "+self.pset+" does not exist")
111 >            else:
112 >                self.pset = None
113          except KeyError:
114              raise CrabException("PSet file missing. Cannot run cmsRun ")
115  
116          # output files
117 +        ## stuff which must be returned always via sandbox
118 +        self.output_file_sandbox = []
119 +
120 +        # add fjr report by default via sandbox
121 +        self.output_file_sandbox.append(self.fjrFileName)
122 +
123 +        # other output files to be returned via sandbox or copied to SE
124          try:
125              self.output_file = []
100
126              tmp = cfg_params['CMSSW.output_file']
127              if tmp != '':
128                  tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
# Line 107 | Line 132 | class Cmssw(JobType):
132                      self.output_file.append(tmp)
133                      pass
134              else:
135 <                log.message("No output file defined: only stdout/err will be available")
135 >                log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
136                  pass
137              pass
138          except KeyError:
139 <            log.message("No output file defined: only stdout/err will be available")
139 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
140              pass
141  
142          # script_exe file as additional file in inputSandbox
143          try:
144 <           self.scriptExe = cfg_params['USER.script_exe']
145 <           self.additional_inbox_files.append(self.scriptExe)
144 >            self.scriptExe = cfg_params['USER.script_exe']
145 >            if self.scriptExe != '':
146 >               if not os.path.isfile(self.scriptExe):
147 >                  msg ="ERROR. file "+self.scriptExe+" not found"
148 >                  raise CrabException(msg)
149 >               self.additional_inbox_files.append(string.strip(self.scriptExe))
150          except KeyError:
151 <           pass
152 <        if self.scriptExe != '':
153 <           if os.path.isfile(self.scriptExe):
154 <              pass
155 <           else:
156 <              log.message("WARNING. file "+self.scriptExe+" not found")
157 <              sys.exit()
129 <                  
151 >            self.scriptExe = ''
152 >
153 >        #CarlosDaniele
154 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
155 >           msg ="Error. script_exe  not defined"
156 >           raise CrabException(msg)
157 >
158          ## additional input files
159          try:
160 <            tmpAddFiles = string.split(cfg_params['CMSSW.additional_input_files'],',')
160 >            tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
161              for tmp in tmpAddFiles:
162 <                if not os.path.exists(tmp):
163 <                    raise CrabException("Additional input file not found: "+tmp)
164 <                tmp=string.strip(tmp)
165 <                self.additional_inbox_files.append(tmp)
162 >                tmp = string.strip(tmp)
163 >                dirname = ''
164 >                if not tmp[0]=="/": dirname = "."
165 >                files = []
166 >                if string.find(tmp,"*")>-1:
167 >                    files = glob.glob(os.path.join(dirname, tmp))
168 >                    if len(files)==0:
169 >                        raise CrabException("No additional input file found with this pattern: "+tmp)
170 >                else:
171 >                    files.append(tmp)
172 >                for file in files:
173 >                    if not os.path.exists(file):
174 >                        raise CrabException("Additional input file not found: "+file)
175 >                    pass
176 >                    fname = string.split(file, '/')[-1]
177 >                    storedFile = common.work_space.pathForTgz()+'share/'+fname
178 >                    shutil.copyfile(file, storedFile)
179 >                    self.additional_inbox_files.append(string.strip(storedFile))
180                  pass
181              pass
182 +            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
183          except KeyError:
184              pass
185  
186 +        # files per job
187          try:
188 <            self.filesPerJob = int(cfg_params['CMSSW.files_per_jobs']) #Daniele
188 >            if (cfg_params['CMSSW.files_per_jobs']):
189 >                raise CrabException("files_per_jobs no longer supported.  Quitting.")
190          except KeyError:
191 <            self.filesPerJob = 1
191 >            pass
192  
193 <        ## Max event   will be total_number_of_events ???  Daniele
193 >        ## Events per job
194 >        try:
195 >            self.eventsPerJob =int( cfg_params['CMSSW.events_per_job'])
196 >            self.selectEventsPerJob = 1
197 >        except KeyError:
198 >            self.eventsPerJob = -1
199 >            self.selectEventsPerJob = 0
200 >    
201 >        ## number of jobs
202          try:
203 <            self.maxEv = cfg_params['CMSSW.event_per_job']
203 >            self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs'])
204 >            self.selectNumberOfJobs = 1
205          except KeyError:
206 <            self.maxEv = "-1"
207 <        ##  
206 >            self.theNumberOfJobs = 0
207 >            self.selectNumberOfJobs = 0
208 >
209          try:
210              self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
211 +            self.selectTotalNumberEvents = 1
212          except KeyError:
213 <            msg = 'Must define total_number_of_events'
214 <            raise CrabException(msg)
215 <        
216 <        CEBlackList = []
213 >            self.total_number_of_events = 0
214 >            self.selectTotalNumberEvents = 0
215 >
216 >        if self.pset != None: #CarlosDaniele
217 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
218 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
219 >                 raise CrabException(msg)
220 >        else:
221 >             if (self.selectNumberOfJobs == 0):
222 >                 msg = 'Must specify  number_of_jobs.'
223 >                 raise CrabException(msg)
224 >
225 >        ## source seed for pythia
226          try:
227 <            tmpBad = string.split(cfg_params['EDG.ce_black_list'],',')
163 <            for tmp in tmpBad:
164 <                tmp=string.strip(tmp)
165 <                CEBlackList.append(tmp)
227 >            self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
228          except KeyError:
229 <            pass
229 >            self.sourceSeed = None
230 >            common.logger.debug(5,"No seed given")
231  
232 <        self.reCEBlackList=[]
233 <        for bad in CEBlackList:
171 <            self.reCEBlackList.append(re.compile( bad ))
172 <
173 <        common.logger.debug(5,'CEBlackList: '+str(CEBlackList))
174 <
175 <        CEWhiteList = []
176 <        try:
177 <            tmpGood = string.split(cfg_params['EDG.ce_white_list'],',')
178 <            for tmp in tmpGood:
179 <                tmp=string.strip(tmp)
180 <                CEWhiteList.append(tmp)
232 >        try:
233 >            self.sourceSeedVtx = int(cfg_params['CMSSW.vtx_seed'])
234          except KeyError:
235 <            pass
235 >            self.sourceSeedVtx = None
236 >            common.logger.debug(5,"No vertex seed given")
237  
238 <        #print 'CEWhiteList: ',CEWhiteList
239 <        self.reCEWhiteList=[]
240 <        for Good in CEWhiteList:
241 <            self.reCEWhiteList.append(re.compile( Good ))
238 >        try:
239 >            self.sourceSeedG4 = int(cfg_params['CMSSW.g4_seed'])
240 >        except KeyError:
241 >            self.sourceSeedG4 = None
242 >            common.logger.debug(5,"No g4 sim hits seed given")
243  
244 <        common.logger.debug(5,'CEWhiteList: '+str(CEWhiteList))
244 >        try:
245 >            self.sourceSeedMix = int(cfg_params['CMSSW.mix_seed'])
246 >        except KeyError:
247 >            self.sourceSeedMix = None
248 >            common.logger.debug(5,"No mix seed given")
249  
250 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
250 >        try:
251 >            self.firstRun = int(cfg_params['CMSSW.first_run'])
252 >        except KeyError:
253 >            self.firstRun = None
254 >            common.logger.debug(5,"No first run given")
255 >        if self.pset != None: #CarlosDaniele
256 >            import PsetManipulator  
257 >            PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
258  
259          #DBSDLS-start
260          ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
261          self.maxEvents=0  # max events available   ( --> check the requested nb. of evts in Creator.py)
262          self.DBSPaths={}  # all dbs paths requested ( --> input to the site local discovery script)
263 +        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
264          ## Perform the data location and discovery (based on DBS/DLS)
265 <        self.DataDiscoveryAndLocation(cfg_params)
265 >        ## SL: Don't if NONE is specified as input (pythia use case)
266 >        blockSites = {}
267 >        if self.datasetPath:
268 >            blockSites = self.DataDiscoveryAndLocation(cfg_params)
269          #DBSDLS-end          
270  
271          self.tgzNameWithPath = self.getTarBall(self.executable)
272 +    
273 +        ## Select Splitting
274 +        if self.selectNoInput:
275 +            if self.pset == None: #CarlosDaniele
276 +                self.jobSplittingForScript()
277 +            else:
278 +                self.jobSplittingNoInput()
279 +        else:
280 +            self.jobSplittingByBlocks(blockSites)
281  
282 <        self.jobSplitting()  #Daniele job Splitting
283 <        self.PsetEdit.maxEvent(self.maxEv) #Daniele  
284 <        self.PsetEdit.inputModule("INPUT") #Daniele  
285 <        self.PsetEdit.psetWriter(self.configFilename())
286 <        
287 <
282 >        # modify Pset
283 >        if self.pset != None: #CarlosDaniele
284 >            try:
285 >                if (self.datasetPath): # standard job
286 >                    # allow to processa a fraction of events in a file
287 >                    PsetEdit.inputModule("INPUT")
288 >                    PsetEdit.maxEvent("INPUTMAXEVENTS")
289 >                    PsetEdit.skipEvent("INPUTSKIPEVENTS")
290 >                else:  # pythia like job
291 >                    PsetEdit.maxEvent(self.eventsPerJob)
292 >                    if (self.firstRun):
293 >                        PsetEdit.pythiaFirstRun("INPUTFIRSTRUN")  #First Run
294 >                    if (self.sourceSeed) :
295 >                        PsetEdit.pythiaSeed("INPUT")
296 >                        if (self.sourceSeedVtx) :
297 >                            PsetEdit.vtxSeed("INPUTVTX")
298 >                        if (self.sourceSeedG4) :
299 >                            self.PsetEdit.g4Seed("INPUTG4")
300 >                        if (self.sourceSeedMix) :
301 >                            self.PsetEdit.mixSeed("INPUTMIX")
302 >                # add FrameworkJobReport to parameter-set
303 >                PsetEdit.addCrabFJR(self.fjrFileName)
304 >                PsetEdit.psetWriter(self.configFilename())
305 >            except:
306 >                msg='Error while manipuliating ParameterSet: exiting...'
307 >                raise CrabException(msg)
308  
309      def DataDiscoveryAndLocation(self, cfg_params):
310  
311 +        import DataDiscovery
312 +        import DataDiscovery_DBS2
313 +        import DataLocation
314          common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()")
315  
214        #datasetPath = "/"+self.owner+"/"+self.dataTiers[0]+"/"+self.dataset
215        
316          datasetPath=self.datasetPath
317  
218        ## TODO
219        dataTiersList = ""
220        dataTiers = dataTiersList.split(',')
221
318          ## Contact the DBS
319 +        common.logger.message("Contacting Data Discovery Services ...")
320          try:
321 <            self.pubdata=DataDiscovery_EDM.DataDiscovery_EDM(datasetPath, dataTiers, cfg_params)
321 >
322 >            if self.use_dbs_1 == 1 :
323 >                self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
324 >            else :
325 >                self.pubdata=DataDiscovery_DBS2.DataDiscovery_DBS2(datasetPath, cfg_params)
326              self.pubdata.fetchDBSInfo()
327  
328 <        except DataDiscovery_EDM.NotExistingDatasetError, ex :
328 >        except DataDiscovery.NotExistingDatasetError, ex :
329              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
330              raise CrabException(msg)
331 <
332 <        except DataDiscovery_EDM.NoDataTierinProvenanceError, ex :
331 >        except DataDiscovery.NoDataTierinProvenanceError, ex :
332 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
333 >            raise CrabException(msg)
334 >        except DataDiscovery.DataDiscoveryError, ex:
335 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
336 >            raise CrabException(msg)
337 >        except DataDiscovery_DBS2.NotExistingDatasetError_DBS2, ex :
338 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
339 >            raise CrabException(msg)
340 >        except DataDiscovery_DBS2.NoDataTierinProvenanceError_DBS2, ex :
341              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
342              raise CrabException(msg)
343 <        except DataDiscovery_EDM.DataDiscoveryError, ex:
344 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
343 >        except DataDiscovery_DBS2.DataDiscoveryError_DBS2, ex:
344 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
345              raise CrabException(msg)
346  
347 <        ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
348 <        ## self.DBSPaths=self.pubdata.getDBSPaths()
349 <        common.logger.message("Required data are :"+self.datasetPath)
241 <
242 <        filesbyblock=self.pubdata.getFiles()
243 <        self.AllInputFiles=filesbyblock.values()
244 <        self.files = self.AllInputFiles        
245 <
246 <        ## TEMP
247 <    #    self.filesTmp = filesbyblock.values()
248 <    #    self.files = []
249 <    #    locPath='rfio:cmsbose2.bo.infn.it:/flatfiles/SE00/cms/fanfani/ProdTest/'
250 <    #    locPath=''
251 <    #    tmp = []
252 <    #    for file in self.filesTmp[0]:
253 <    #        tmp.append(locPath+file)
254 <    #    self.files.append(tmp)
255 <        ## END TEMP
347 >        self.filesbyblock=self.pubdata.getFiles()
348 >        self.eventsbyblock=self.pubdata.getEventsPerBlock()
349 >        self.eventsbyfile=self.pubdata.getEventsPerFile()
350  
351          ## get max number of events
258        #common.logger.debug(10,"number of events for primary fileblocks %i"%self.pubdata.getMaxEvents())
352          self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
260        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
353  
354          ## Contact the DLS and build a list of sites hosting the fileblocks
355          try:
356 <            dataloc=DataLocation_EDM.DataLocation_EDM(filesbyblock.keys(),cfg_params)
356 >            dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params)
357              dataloc.fetchDLSInfo()
358 <        except DataLocation_EDM.DataLocationError , ex:
358 >        except DataLocation.DataLocationError , ex:
359              msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage()
360              raise CrabException(msg)
361          
270        allsites=dataloc.getSites()
271        common.logger.debug(5,"sites are %s"%allsites)
272        sites=self.checkBlackList(allsites)
273        common.logger.debug(5,"sites are (after black list) %s"%sites)
274        sites=self.checkWhiteList(sites)
275        common.logger.debug(5,"sites are (after white list) %s"%sites)
362  
363 <        if len(sites)==0:
364 <            msg = 'No sites hosting all the needed data! Exiting... '
365 <            raise CrabException(msg)
363 >        sites = dataloc.getSites()
364 >        allSites = []
365 >        listSites = sites.values()
366 >        for listSite in listSites:
367 >            for oneSite in listSite:
368 >                allSites.append(oneSite)
369 >        allSites = self.uniquelist(allSites)
370  
371 <        common.logger.message("List of Sites hosting the data : "+str(sites))
372 <        common.logger.debug(6, "List of Sites: "+str(sites))
373 <        common.analisys_common_info['sites']=sites    ## used in SchedulerEdg.py in createSchScript
374 <        self.setParam_('TargetCE', ','.join(sites))
285 <        return
371 >        # screen output
372 >        common.logger.message("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n")
373 >
374 >        return sites
375      
376 <    def jobSplitting(self):
376 >    def jobSplittingByBlocks(self, blockSites):
377          """
378 <        first implemntation for job splitting  
379 <        """    
380 <      #  print 'eventi totali '+str(self.maxEvents)
381 <      #  print 'eventi totali richiesti dallo user '+str(self.total_number_of_events)
382 <        #print 'files per job '+str(self.filesPerJob)
383 <        common.logger.message('Required '+str(self.filesPerJob)+' files per job ')
384 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
385 <
386 <        ## TODO: SL need to have (from DBS) a detailed list of how many events per each file
387 <        n_tot_files = (len(self.files[0]))
388 <        ## SL: this is wrong if the files have different number of events
389 <        evPerFile = int(self.maxEvents)/n_tot_files
390 <        
391 <        common.logger.debug(5,'Events per File '+str(evPerFile))
392 <
393 <        ## if asked to process all events, do it
394 <        if self.total_number_of_events == -1:
395 <            self.total_number_of_events=self.maxEvents
396 <            self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
397 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for all available events '+str(self.total_number_of_events)+' events')
398 <        
378 >        Perform job splitting. Jobs run over an integer number of files
379 >        and no more than one block.
380 >        ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values
381 >        REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
382 >                  self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
383 >                  self.maxEvents, self.filesbyblock
384 >        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
385 >              self.total_number_of_jobs - Total # of jobs
386 >              self.list_of_args - File(s) job will run on (a list of lists)
387 >        """
388 >
389 >        # ---- Handle the possible job splitting configurations ---- #
390 >        if (self.selectTotalNumberEvents):
391 >            totalEventsRequested = self.total_number_of_events
392 >        if (self.selectEventsPerJob):
393 >            eventsPerJobRequested = self.eventsPerJob
394 >            if (self.selectNumberOfJobs):
395 >                totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob
396 >
397 >        # If user requested all the events in the dataset
398 >        if (totalEventsRequested == -1):
399 >            eventsRemaining=self.maxEvents
400 >        # If user requested more events than are in the dataset
401 >        elif (totalEventsRequested > self.maxEvents):
402 >            eventsRemaining = self.maxEvents
403 >            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
404 >        # If user requested less events than are in the dataset
405          else:
406 <            self.total_number_of_files = int(self.total_number_of_events/evPerFile)
407 <            ## SL: if ask for less event than what is computed to be available on a
408 <            ##     file, process the first file anyhow.
409 <            if self.total_number_of_files == 0:
410 <                self.total_number_of_files = self.total_number_of_files + 1
406 >            eventsRemaining = totalEventsRequested
407 >
408 >        # If user requested more events per job than are in the dataset
409 >        if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents):
410 >            eventsPerJobRequested = self.maxEvents
411 >
412 >        # For user info at end
413 >        totalEventCount = 0
414 >
415 >        if (self.selectTotalNumberEvents and self.selectNumberOfJobs):
416 >            eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
417  
418 <            common.logger.debug(5,'N files  '+str(self.total_number_of_files))
418 >        if (self.selectNumberOfJobs):
419 >            common.logger.message("May not create the exact number_of_jobs requested.")
420  
421 <            check = 0
421 >        if ( self.ncjobs == 'all' ) :
422 >            totalNumberOfJobs = 999999999
423 >        else :
424 >            totalNumberOfJobs = self.ncjobs
425              
321            ## Compute the number of jobs
322            #self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
323            self.total_number_of_jobs = int(self.total_number_of_files/self.filesPerJob)
324            common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
426  
427 <            ## is there any remainder?
428 <            check = int(self.total_number_of_files) - (int(self.total_number_of_jobs)*self.filesPerJob)
427 >        blocks = blockSites.keys()
428 >        blockCount = 0
429 >        # Backup variable in case self.maxEvents counted events in a non-included block
430 >        numBlocksInDataset = len(blocks)
431 >
432 >        jobCount = 0
433 >        list_of_lists = []
434 >
435 >        # list tracking which jobs are in which jobs belong to which block
436 >        jobsOfBlock = {}
437 >
438 >        # ---- Iterate over the blocks in the dataset until ---- #
439 >        # ---- we've met the requested total # of events    ---- #
440 >        while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
441 >            block = blocks[blockCount]
442 >            blockCount += 1
443 >            
444 >            if self.eventsbyblock.has_key(block) :
445 >                numEventsInBlock = self.eventsbyblock[block]
446 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
447 >            
448 >                files = self.filesbyblock[block]
449 >                numFilesInBlock = len(files)
450 >                if (numFilesInBlock <= 0):
451 >                    continue
452 >                fileCount = 0
453 >
454 >                # ---- New block => New job ---- #
455 >                parString = "\\{"
456 >                # counter for number of events in files currently worked on
457 >                filesEventCount = 0
458 >                # flag if next while loop should touch new file
459 >                newFile = 1
460 >                # job event counter
461 >                jobSkipEventCount = 0
462 >            
463 >                # ---- Iterate over the files in the block until we've met the requested ---- #
464 >                # ---- total # of events or we've gone over all the files in this block  ---- #
465 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
466 >                    file = files[fileCount]
467 >                    if newFile :
468 >                        try:
469 >                            numEventsInFile = self.eventsbyfile[file]
470 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
471 >                            # increase filesEventCount
472 >                            filesEventCount += numEventsInFile
473 >                            # Add file to current job
474 >                            parString += '\\\"' + file + '\\\"\,'
475 >                            newFile = 0
476 >                        except KeyError:
477 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
478 >                        
479 >
480 >                    # if less events in file remain than eventsPerJobRequested
481 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) :
482 >                        # if last file in block
483 >                        if ( fileCount == numFilesInBlock-1 ) :
484 >                            # end job using last file, use remaining events in block
485 >                            # close job and touch new file
486 >                            fullString = parString[:-2]
487 >                            fullString += '\\}'
488 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
489 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
490 >                            self.jobDestination.append(blockSites[block])
491 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
492 >                            # fill jobs of block dictionary
493 >                            if block in jobsOfBlock.keys() :
494 >                                jobsOfBlock[block].append(jobCount+1)
495 >                            else:
496 >                                jobsOfBlock[block] = [jobCount+1]
497 >                            # reset counter
498 >                            jobCount = jobCount + 1
499 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
500 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
501 >                            jobSkipEventCount = 0
502 >                            # reset file
503 >                            parString = "\\{"
504 >                            filesEventCount = 0
505 >                            newFile = 1
506 >                            fileCount += 1
507 >                        else :
508 >                            # go to next file
509 >                            newFile = 1
510 >                            fileCount += 1
511 >                    # if events in file equal to eventsPerJobRequested
512 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
513 >                        # close job and touch new file
514 >                        fullString = parString[:-2]
515 >                        fullString += '\\}'
516 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
517 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
518 >                        self.jobDestination.append(blockSites[block])
519 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
520 >                        if block in jobsOfBlock.keys() :
521 >                            jobsOfBlock[block].append(jobCount+1)
522 >                        else:
523 >                            jobsOfBlock[block] = [jobCount+1]
524 >                        # reset counter
525 >                        jobCount = jobCount + 1
526 >                        totalEventCount = totalEventCount + eventsPerJobRequested
527 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
528 >                        jobSkipEventCount = 0
529 >                        # reset file
530 >                        parString = "\\{"
531 >                        filesEventCount = 0
532 >                        newFile = 1
533 >                        fileCount += 1
534 >                        
535 >                    # if more events in file remain than eventsPerJobRequested
536 >                    else :
537 >                        # close job but don't touch new file
538 >                        fullString = parString[:-2]
539 >                        fullString += '\\}'
540 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
541 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
542 >                        self.jobDestination.append(blockSites[block])
543 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
544 >                        if block in jobsOfBlock.keys() :
545 >                            jobsOfBlock[block].append(jobCount+1)
546 >                        else:
547 >                            jobsOfBlock[block] = [jobCount+1]
548 >                        # increase counter
549 >                        jobCount = jobCount + 1
550 >                        totalEventCount = totalEventCount + eventsPerJobRequested
551 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
552 >                        # calculate skip events for last file
553 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
554 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
555 >                        # remove all but the last file
556 >                        filesEventCount = self.eventsbyfile[file]
557 >                        parString = "\\{"
558 >                        parString += '\\\"' + file + '\\\"\,'
559 >                    pass # END if
560 >                pass # END while (iterate over files in the block)
561 >        pass # END while (iterate over blocks in the dataset)
562 >        self.ncjobs = self.total_number_of_jobs = jobCount
563 >        if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
564 >            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
565 >        common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
566 >        
567 >        # screen output
568 >        screenOutput = "List of jobs and available destination sites:\n\n"
569 >
570 >        blockCounter = 0
571 >        for block in jobsOfBlock.keys():
572 >            blockCounter += 1
573 >            screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),','.join(blockSites[block]))
574 >
575 >        common.logger.message(screenOutput)
576 >
577 >        self.list_of_args = list_of_lists
578 >        return
579 >
580 >    def jobSplittingNoInput(self):
581 >        """
582 >        Perform job splitting based on number of event per job
583 >        """
584 >        common.logger.debug(5,'Splitting per events')
585 >        common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
586 >        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
587 >        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
588 >
589 >        if (self.total_number_of_events < 0):
590 >            msg='Cannot split jobs per Events with "-1" as total number of events'
591 >            raise CrabException(msg)
592 >
593 >        if (self.selectEventsPerJob):
594 >            if (self.selectTotalNumberEvents):
595 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
596 >            elif(self.selectNumberOfJobs) :  
597 >                self.total_number_of_jobs =self.theNumberOfJobs
598 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
599 >
600 >        elif (self.selectNumberOfJobs) :
601 >            self.total_number_of_jobs = self.theNumberOfJobs
602 >            self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
603 >
604 >        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
605  
606 <            common.logger.debug(5,'Check  '+str(check))
606 >        # is there any remainder?
607 >        check = int(self.total_number_of_events) - (int(self.total_number_of_jobs)*self.eventsPerJob)
608  
609 <            if check > 0:
332 <                self.total_number_of_jobs =  self.total_number_of_jobs + 1
333 <                common.logger.message('Warning: last job will be created with '+str(check)+' files')
609 >        common.logger.debug(5,'Check  '+str(check))
610  
611 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs-1)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
611 >        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
612 >        if check > 0:
613 >            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
614 >
615 >        # argument is seed number.$i
616 >        self.list_of_args = []
617 >        for i in range(self.total_number_of_jobs):
618 >            ## Since there is no input, any site is good
619 >           # self.jobDestination.append(["Any"])
620 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
621 >            args=[]
622 >            if (self.firstRun):
623 >                    ## pythia first run
624 >                #self.list_of_args.append([(str(self.firstRun)+str(i))])
625 >                args.append(str(self.firstRun)+str(i))
626 >            else:
627 >                ## no first run
628 >                #self.list_of_args.append([str(i)])
629 >                args.append(str(i))
630 >            if (self.sourceSeed):
631 >                args.append(str(self.sourceSeed)+str(i))
632 >                if (self.sourceSeedVtx):
633 >                    ## + vtx random seed
634 >                    args.append(str(self.sourceSeedVtx)+str(i))
635 >                if (self.sourceSeedG4):
636 >                    ## + G4 random seed
637 >                    args.append(str(self.sourceSeedG4)+str(i))
638 >                if (self.sourceSeedMix):    
639 >                    ## + Mix random seed
640 >                    args.append(str(self.sourceSeedMix)+str(i))
641 >                pass
642              pass
643 +            self.list_of_args.append(args)
644 +        pass
645 +            
646 +        # print self.list_of_args
647  
648 <        list_of_lists = []
649 <        for i in xrange(0, int(n_tot_files), self.filesPerJob):
650 <            list_of_lists.append(self.files[0][i: i+self.filesPerJob])
648 >        return
649 >
650 >
651 >    def jobSplittingForScript(self):#CarlosDaniele
652 >        """
653 >        Perform job splitting based on number of job
654 >        """
655 >        common.logger.debug(5,'Splitting per job')
656 >        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
657 >
658 >        self.total_number_of_jobs = self.theNumberOfJobs
659 >
660 >        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
661  
662 <        self.list_of_files = list_of_lists
663 <      
662 >        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
663 >
664 >        # argument is seed number.$i
665 >        self.list_of_args = []
666 >        for i in range(self.total_number_of_jobs):
667 >            ## Since there is no input, any site is good
668 >           # self.jobDestination.append(["Any"])
669 >            self.jobDestination.append([""])
670 >            ## no random seed
671 >            self.list_of_args.append([str(i)])
672          return
673  
674      def split(self, jobParams):
# Line 348 | Line 676 | class Cmssw(JobType):
676          common.jobDB.load()
677          #### Fabio
678          njobs = self.total_number_of_jobs
679 <        filelist = self.list_of_files
679 >        arglist = self.list_of_args
680          # create the empty structure
681          for i in range(njobs):
682              jobParams.append("")
683          
684          for job in range(njobs):
685 <            jobParams[job] = filelist[job]
685 >            jobParams[job] = arglist[job]
686 >            # print str(arglist[job])
687 >            # print jobParams[job]
688              common.jobDB.setArguments(job, jobParams[job])
689 +            common.logger.debug(5,"Job "+str(job)+" Destination: "+str(self.jobDestination[job]))
690 +            common.jobDB.setDestination(job, self.jobDestination[job])
691  
692          common.jobDB.save()
693          return
694      
695      def getJobTypeArguments(self, nj, sched):
696 <        params = common.jobDB.arguments(nj)
697 <        #print params
698 <        parString = "\\{"
699 <        
368 <        for i in range(len(params) - 1):
369 <            parString += '\\\"' + params[i] + '\\\"\,'
370 <        
371 <        parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
372 <        return parString
696 >        result = ''
697 >        for i in common.jobDB.arguments(nj):
698 >            result=result+str(i)+" "
699 >        return result
700    
701      def numberOfJobs(self):
702          # Fabio
376
703          return self.total_number_of_jobs
378
379
380
381    def checkBlackList(self, allSites):
382        if len(self.reCEBlackList)==0: return allSites
383        sites = []
384        for site in allSites:
385            common.logger.debug(10,'Site '+site)
386            good=1
387            for re in self.reCEBlackList:
388                if re.search(site):
389                    common.logger.message('CE in black list, skipping site '+site)
390                    good=0
391                pass
392            if good: sites.append(site)
393        if len(sites) == 0:
394            common.logger.debug(3,"No sites found after BlackList")
395        return sites
396
397    def checkWhiteList(self, allSites):
398
399        if len(self.reCEWhiteList)==0: return allSites
400        sites = []
401        for site in allSites:
402            good=0
403            for re in self.reCEWhiteList:
404                if re.search(site):
405                    common.logger.debug(5,'CE in white list, adding site '+site)
406                    good=1
407                if not good: continue
408                sites.append(site)
409        if len(sites) == 0:
410            common.logger.message("No sites found after WhiteList\n")
411        else:
412            common.logger.debug(5,"Selected sites via WhiteList are "+str(sites)+"\n")
413        return sites
704  
705      def getTarBall(self, exe):
706          """
# Line 418 | Line 708 | class Cmssw(JobType):
708          """
709          
710          # if it exist, just return it
711 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
711 >        #
712 >        # Marco. Let's start to use relative path for Boss XML files
713 >        #
714 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
715          if os.path.exists(self.tgzNameWithPath):
716              return self.tgzNameWithPath
717  
# Line 432 | Line 725 | class Cmssw(JobType):
725          # First of all declare the user Scram area
726          swArea = self.scram.getSWArea_()
727          #print "swArea = ", swArea
728 <        swVersion = self.scram.getSWVersion()
729 <        #print "swVersion = ", swVersion
728 >        # swVersion = self.scram.getSWVersion()
729 >        # print "swVersion = ", swVersion
730          swReleaseTop = self.scram.getReleaseTop_()
731          #print "swReleaseTop = ", swReleaseTop
732          
# Line 441 | Line 734 | class Cmssw(JobType):
734          if swReleaseTop == '' or swArea == swReleaseTop:
735              return
736  
737 <        filesToBeTarred = []
738 <        ## First find the executable
739 <        if (self.executable != ''):
740 <            exeWithPath = self.scram.findFile_(executable)
741 < #           print exeWithPath
742 <            if ( not exeWithPath ):
743 <                raise CrabException('User executable '+executable+' not found')
744 <
745 <            ## then check if it's private or not
746 <            if exeWithPath.find(swReleaseTop) == -1:
747 <                # the exe is private, so we must ship
748 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
749 <                path = swArea+'/'
750 <                exe = string.replace(exeWithPath, path,'')
751 <                filesToBeTarred.append(exe)
752 <                pass
753 <            else:
754 <                # the exe is from release, we'll find it on WN
755 <                pass
756 <
757 <        ## Now get the libraries: only those in local working area
758 <        libDir = 'lib'
759 <        lib = swArea+'/' +libDir
760 <        common.logger.debug(5,"lib "+lib+" to be tarred")
761 <        if os.path.exists(lib):
762 <            filesToBeTarred.append(libDir)
763 <
764 <        ## Now check if module dir is present
765 <        moduleDir = 'module'
766 <        if os.path.isdir(swArea+'/'+moduleDir):
767 <            filesToBeTarred.append(moduleDir)
768 <
769 <        ## Now check if the Data dir is present
770 <        dataDir = 'src/Data/'
771 <        if os.path.isdir(swArea+'/'+dataDir):
772 <            filesToBeTarred.append(dataDir)
773 <
774 <        ## Create the tar-ball
775 <        if len(filesToBeTarred)>0:
776 <            cwd = os.getcwd()
777 <            os.chdir(swArea)
778 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
779 <            for line in filesToBeTarred:
780 <                tarcmd = tarcmd + line + ' '
781 <            cout = runCommand(tarcmd)
782 <            if not cout:
783 <                raise CrabException('Could not create tar-ball')
784 <            os.chdir(cwd)
785 <        else:
786 <            common.logger.debug(5,"No files to be to be tarred")
737 >        import tarfile
738 >        try: # create tar ball
739 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
740 >            ## First find the executable
741 >            if (self.executable != ''):
742 >                exeWithPath = self.scram.findFile_(executable)
743 >                if ( not exeWithPath ):
744 >                    raise CrabException('User executable '+executable+' not found')
745 >    
746 >                ## then check if it's private or not
747 >                if exeWithPath.find(swReleaseTop) == -1:
748 >                    # the exe is private, so we must ship
749 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
750 >                    path = swArea+'/'
751 >                    # distinguish case when script is in user project area or given by full path somewhere else
752 >                    if exeWithPath.find(path) >= 0 :
753 >                        exe = string.replace(exeWithPath, path,'')
754 >                        tar.add(path+exe,os.path.basename(executable))
755 >                    else :
756 >                        tar.add(exeWithPath,os.path.basename(executable))
757 >                    pass
758 >                else:
759 >                    # the exe is from release, we'll find it on WN
760 >                    pass
761 >    
762 >            ## Now get the libraries: only those in local working area
763 >            libDir = 'lib'
764 >            lib = swArea+'/' +libDir
765 >            common.logger.debug(5,"lib "+lib+" to be tarred")
766 >            if os.path.exists(lib):
767 >                tar.add(lib,libDir)
768 >    
769 >            ## Now check if module dir is present
770 >            moduleDir = 'module'
771 >            module = swArea + '/' + moduleDir
772 >            if os.path.isdir(module):
773 >                tar.add(module,moduleDir)
774 >
775 >            ## Now check if any data dir(s) is present
776 >            swAreaLen=len(swArea)
777 >            for root, dirs, files in os.walk(swArea):
778 >                if "data" in dirs:
779 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
780 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
781 >
782 >            ## Add ProdAgent dir to tar
783 >            paDir = 'ProdAgentApi'
784 >            pa = os.environ['CRABDIR'] + '/' + 'ProdAgentApi'
785 >            if os.path.isdir(pa):
786 >                tar.add(pa,paDir)
787 >        
788 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
789 >            tar.close()
790 >        except :
791 >            raise CrabException('Could not create tar-ball')
792 >
793 >        ## check for tarball size
794 >        tarballinfo = os.stat(self.tgzNameWithPath)
795 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
796 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
797 >
798 >        ## create tar-ball with ML stuff
799 >        self.MLtgzfile =  common.work_space.pathForTgz()+'share/MLfiles.tgz'
800 >        try:
801 >            tar = tarfile.open(self.MLtgzfile, "w:gz")
802 >            path=os.environ['CRABDIR'] + '/python/'
803 >            for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py']:
804 >                tar.add(path+file,file)
805 >            common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames()))
806 >            tar.close()
807 >        except :
808 >            raise CrabException('Could not create ML files tar-ball')
809          
810          return
811          
# Line 507 | Line 822 | class Cmssw(JobType):
822          txt += 'if [ $middleware == LCG ]; then \n'
823          txt += self.wsSetupCMSLCGEnvironment_()
824          txt += 'elif [ $middleware == OSG ]; then\n'
825 <        txt += '    time=`date -u +"%s"`\n'
826 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
512 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
513 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
825 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
826 >        txt += '    echo "Created working directory: $WORKING_DIR"\n'
827          txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
828          txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
829 <        txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
830 <        txt += '        echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
831 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
829 >        txt += '    echo "JOB_EXIT_STATUS = 10016"\n'
830 >        txt += '    echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
831 >        txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
832 >        txt += '        rm -f $RUNTIME_AREA/$repo \n'
833 >        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
834 >        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
835          txt += '        exit 1\n'
836          txt += '    fi\n'
837          txt += '\n'
# Line 528 | Line 844 | class Cmssw(JobType):
844          scram = self.scram.commandName()
845          txt += '\n\n'
846          txt += 'echo "### SPECIFIC JOB SETUP ENVIRONMENT ###"\n'
847 +        txt += 'echo "Setting SCRAM_ARCH='+self.executable_arch+'"\n'
848 +        txt += 'export SCRAM_ARCH='+self.executable_arch+'\n'
849          txt += scram+' project CMSSW '+self.version+'\n'
850          txt += 'status=$?\n'
851          txt += 'if [ $status != 0 ] ; then\n'
# Line 535 | Line 853 | class Cmssw(JobType):
853          txt += '   echo "JOB_EXIT_STATUS = 10034"\n'
854          txt += '   echo "JobExitCode=10034" | tee -a $RUNTIME_AREA/$repo\n'
855          txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
856 +        txt += '   rm -f $RUNTIME_AREA/$repo \n'
857 +        txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
858 +        txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
859          ## OLI_Daniele
860          txt += '    if [ $middleware == OSG ]; then \n'
861          txt += '        echo "Remove working directory: $WORKING_DIR"\n'
862          txt += '        cd $RUNTIME_AREA\n'
863          txt += '        /bin/rm -rf $WORKING_DIR\n'
864          txt += '        if [ -d $WORKING_DIR ] ;then\n'
865 <        txt += '            echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
866 <        txt += '            echo "JOB_EXIT_STATUS = 10018"\n'
867 <        txt += '            echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
868 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
865 >        txt += '        echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
866 >        txt += '        echo "JOB_EXIT_STATUS = 10018"\n'
867 >        txt += '        echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
868 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
869 >        txt += '            rm -f $RUNTIME_AREA/$repo \n'
870 >        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
871 >        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
872          txt += '        fi\n'
873          txt += '    fi \n'
874          txt += '   exit 1 \n'
# Line 552 | Line 876 | class Cmssw(JobType):
876          txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
877          txt += 'cd '+self.version+'\n'
878          ### needed grep for bug in scramv1 ###
879 +        txt += scram+' runtime -sh\n'
880          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
881 +        txt += 'echo $PATH\n'
882  
883          # Handle the arguments:
884          txt += "\n"
885          txt += "## number of arguments (first argument always jobnumber)\n"
886          txt += "\n"
887 <        txt += "narg=$#\n"
888 <        txt += "if [ $narg -lt 2 ]\n"
887 > #        txt += "narg=$#\n"
888 >        txt += "if [ $nargs -lt 2 ]\n"
889          txt += "then\n"
890 <        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$narg+ \n"
890 >        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$nargs+ \n"
891          txt += '    echo "JOB_EXIT_STATUS = 50113"\n'
892          txt += '    echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
893          txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
894 +        txt += '    rm -f $RUNTIME_AREA/$repo \n'
895 +        txt += '    echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
896 +        txt += '    echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
897          ## OLI_Daniele
898          txt += '    if [ $middleware == OSG ]; then \n'
899          txt += '        echo "Remove working directory: $WORKING_DIR"\n'
900          txt += '        cd $RUNTIME_AREA\n'
901          txt += '        /bin/rm -rf $WORKING_DIR\n'
902          txt += '        if [ -d $WORKING_DIR ] ;then\n'
903 <        txt += '            echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
904 <        txt += '            echo "JOB_EXIT_STATUS = 50114"\n'
905 <        txt += '            echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
906 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
903 >        txt += '        echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
904 >        txt += '        echo "JOB_EXIT_STATUS = 50114"\n'
905 >        txt += '        echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
906 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
907 >        txt += '            rm -f $RUNTIME_AREA/$repo \n'
908 >        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
909 >        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
910          txt += '        fi\n'
911          txt += '    fi \n'
912          txt += "    exit 1\n"
# Line 583 | Line 915 | class Cmssw(JobType):
915  
916          # Prepare job-specific part
917          job = common.job_list[nj]
918 <        pset = os.path.basename(job.configFilename())
919 <        txt += '\n'
920 <        txt += 'InputFiles=$2\n'
921 <        txt += 'echo "<$InputFiles>"\n'
922 <        #txt += 'echo sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' \n'
923 <        txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
924 <        #txt += 'sed "s#{\'INPUT\'}#${InputFiles}#" $RUNTIME_AREA/'+pset+' > pset1.cfg\n'
918 >        if self.pset != None: #CarlosDaniele
919 >            pset = os.path.basename(job.configFilename())
920 >            txt += '\n'
921 >            if (self.datasetPath): # standard job
922 >                #txt += 'InputFiles=$2\n'
923 >                txt += 'InputFiles=${args[1]}\n'
924 >                txt += 'MaxEvents=${args[2]}\n'
925 >                txt += 'SkipEvents=${args[3]}\n'
926 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
927 >                txt += 'sed "s#{\'INPUT\'}#$InputFiles#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
928 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
929 >                txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
930 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
931 >                txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
932 >            else:  # pythia like job
933 >                seedIndex=1
934 >                if (self.firstRun):
935 >                    txt += 'FirstRun=${args['+str(seedIndex)+']}\n'
936 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
937 >                    txt += 'sed "s#\<INPUTFIRSTRUN\>#$FirstRun#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
938 >                    seedIndex=seedIndex+1
939 >
940 >                if (self.sourceSeed):
941 >                    txt += 'Seed=${args['+str(seedIndex)+']}\n'
942 >                    txt += 'sed "s#\<INPUT\>#$Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
943 >                    seedIndex=seedIndex+1
944 >                    ## the following seeds are not always present
945 >                    if (self.sourceSeedVtx):
946 >                        txt += 'VtxSeed=${args['+str(seedIndex)+']}\n'
947 >                        txt += 'echo "VtxSeed: <$VtxSeed>"\n'
948 >                        txt += 'sed "s#\<INPUTVTX\>#$VtxSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
949 >                        seedIndex += 1
950 >                    if (self.sourceSeedG4):
951 >                        txt += 'G4Seed=${args['+str(seedIndex)+']}\n'
952 >                        txt += 'echo "G4Seed: <$G4Seed>"\n'
953 >                        txt += 'sed "s#\<INPUTG4\>#$G4Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
954 >                        seedIndex += 1
955 >                    if (self.sourceSeedMix):
956 >                        txt += 'mixSeed=${args['+str(seedIndex)+']}\n'
957 >                        txt += 'echo "MixSeed: <$mixSeed>"\n'
958 >                        txt += 'sed "s#\<INPUTMIX\>#$mixSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
959 >                        seedIndex += 1
960 >                    pass
961 >                pass
962 >            txt += 'mv -f '+pset+' pset.cfg\n'
963  
964          if len(self.additional_inbox_files) > 0:
965              for file in self.additional_inbox_files:
966 <                txt += 'if [ -e $RUNTIME_AREA/'+file+' ] ; then\n'
967 <                txt += '   cp $RUNTIME_AREA/'+file+' .\n'
968 <                txt += '   chmod +x '+file+'\n'
966 >                relFile = file.split("/")[-1]
967 >                txt += 'if [ -e $RUNTIME_AREA/'+relFile+' ] ; then\n'
968 >                txt += '   cp $RUNTIME_AREA/'+relFile+' .\n'
969 >                txt += '   chmod +x '+relFile+'\n'
970                  txt += 'fi\n'
971              pass
972  
973 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
974 <
975 <        txt += '\n'
976 <        txt += 'echo "***** cat pset.cfg *********"\n'
977 <        txt += 'cat pset.cfg\n'
978 <        txt += 'echo "****** end pset.cfg ********"\n'
979 <        txt += '\n'
980 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
610 <        # txt += 'cat pset1.cfg\n'
611 <        # txt += 'echo "****** end pset1.cfg ********"\n'
973 >        if self.pset != None: #CarlosDaniele
974 >            txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
975 >        
976 >            txt += '\n'
977 >            txt += 'echo "***** cat pset.cfg *********"\n'
978 >            txt += 'cat pset.cfg\n'
979 >            txt += 'echo "****** end pset.cfg ********"\n'
980 >            txt += '\n'
981          return txt
982  
983 <    def wsBuildExe(self, nj):
983 >    def wsBuildExe(self, nj=0):
984          """
985          Put in the script the commands to build an executable
986          or a library.
# Line 632 | Line 1001 | class Cmssw(JobType):
1001              txt += '       cd $RUNTIME_AREA\n'
1002              txt += '       /bin/rm -rf $WORKING_DIR\n'
1003              txt += '       if [ -d $WORKING_DIR ] ;then\n'
1004 <            txt += '        echo "SET_EXE 50999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Untarring .tgz file failed"\n'
1005 <            txt += '        echo "JOB_EXIT_STATUS = 50999"\n'
1006 <            txt += '        echo "JobExitCode=50999" | tee -a $RUNTIME_AREA/$repo\n'
1007 <            txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1004 >            txt += '           echo "SET_EXE 50999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Untarring .tgz file failed"\n'
1005 >            txt += '           echo "JOB_EXIT_STATUS = 50999"\n'
1006 >            txt += '           echo "JobExitCode=50999" | tee -a $RUNTIME_AREA/$repo\n'
1007 >            txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1008 >            txt += '           rm -f $RUNTIME_AREA/$repo \n'
1009 >            txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1010 >            txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1011              txt += '       fi\n'
1012              txt += '   fi \n'
1013              txt += '   \n'
# Line 643 | Line 1015 | class Cmssw(JobType):
1015              txt += 'else \n'
1016              txt += '   echo "Successful untar" \n'
1017              txt += 'fi \n'
1018 +            txt += '\n'
1019 +            txt += 'echo "Include ProdAgentApi in PYTHONPATH"\n'
1020 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
1021 +            txt += '   export PYTHONPATH=ProdAgentApi\n'
1022 +            txt += 'else\n'
1023 +            txt += '   export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n'
1024 +            txt += 'fi\n'
1025 +            txt += '\n'
1026 +
1027              pass
1028          
1029          return txt
# Line 654 | Line 1035 | class Cmssw(JobType):
1035          """
1036          
1037      def executableName(self):
1038 <        return self.executable
1038 >        if self.scriptExe: #CarlosDaniele
1039 >            return "sh "
1040 >        else:
1041 >            return self.executable
1042  
1043      def executableArgs(self):
1044 <        return " -p pset.cfg"
1044 >        if self.scriptExe:#CarlosDaniele
1045 >            return   self.scriptExe + " $NJob"
1046 >        else:
1047 >            return " -p pset.cfg"
1048  
1049      def inputSandbox(self, nj):
1050          """
1051          Returns a list of filenames to be put in JDL input sandbox.
1052          """
1053          inp_box = []
1054 <        # dict added to delete duplicate from input sandbox file list
1055 <        seen = {}
1054 >        # # dict added to delete duplicate from input sandbox file list
1055 >        # seen = {}
1056          ## code
1057          if os.path.isfile(self.tgzNameWithPath):
1058              inp_box.append(self.tgzNameWithPath)
1059 +        if os.path.isfile(self.MLtgzfile):
1060 +            inp_box.append(self.MLtgzfile)
1061          ## config
1062 <        inp_box.append(common.job_list[nj].configFilename())
1062 >        if not self.pset is None:
1063 >            inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename())
1064          ## additional input files
1065 <        #for file in self.additional_inbox_files:
1066 <        #    inp_box.append(common.work_space.cwdDir()+file)
1065 >        for file in self.additional_inbox_files:
1066 >            inp_box.append(file)
1067          return inp_box
1068  
1069      def outputSandbox(self, nj):
# Line 682 | Line 1072 | class Cmssw(JobType):
1072          """
1073          out_box = []
1074  
685        stdout=common.job_list[nj].stdout()
686        stderr=common.job_list[nj].stderr()
687
1075          ## User Declared output files
1076 <        for out in self.output_file:
1076 >        for out in (self.output_file+self.output_file_sandbox):
1077              n_out = nj + 1
1078              out_box.append(self.numberFile_(out,str(n_out)))
1079          return out_box
693        return []
1080  
1081      def prepareSteeringCards(self):
1082          """
# Line 706 | Line 1092 | class Cmssw(JobType):
1092          txt = '\n'
1093          txt += '# directory content\n'
1094          txt += 'ls \n'
1095 <        file_list = ''
1096 <        for fileWithSuffix in self.output_file:
1095 >
1096 >        for fileWithSuffix in (self.output_file+self.output_file_sandbox):
1097              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
712            file_list=file_list+output_file_num+' '
1098              txt += '\n'
1099              txt += '# check output file\n'
1100              txt += 'ls '+fileWithSuffix+'\n'
1101 <            txt += 'exe_result=$?\n'
1102 <            txt += 'if [ $exe_result -ne 0 ] ; then\n'
1103 <            txt += '   echo "ERROR: No output file to manage"\n'
1104 <            txt += '   echo "JOB_EXIT_STATUS = $exe_result"\n'
1105 <            txt += '   echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
1106 <            txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
1107 <            ### OLI_DANIELE
1101 >            txt += 'ls_result=$?\n'
1102 >            txt += 'if [ $ls_result -ne 0 ] ; then\n'
1103 >            #txt += '   JOB_EXIT_STATUS=60302\n'
1104 >            ### FEDE
1105 >            txt += '   exit_status=60302\n'
1106 >            ####
1107 >            txt += '   echo "ERROR: Problem with output file"\n'
1108              if common.scheduler.boss_scheduler_name == 'condor_g':
1109                  txt += '    if [ $middleware == OSG ]; then \n'
1110                  txt += '        echo "prepare dummy output file"\n'
# Line 730 | Line 1115 | class Cmssw(JobType):
1115              txt += 'fi\n'
1116        
1117          txt += 'cd $RUNTIME_AREA\n'
1118 <        file_list=file_list[:-1]
734 <        txt += 'file_list="'+file_list+'"\n'
1118 >        txt += 'cd $RUNTIME_AREA\n'
1119          ### OLI_DANIELE
1120          txt += 'if [ $middleware == OSG ]; then\n'  
1121          txt += '    cd $RUNTIME_AREA\n'
1122          txt += '    echo "Remove working directory: $WORKING_DIR"\n'
1123          txt += '    /bin/rm -rf $WORKING_DIR\n'
1124          txt += '    if [ -d $WORKING_DIR ] ;then\n'
1125 <        txt += '        echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1126 <        txt += '        echo "JOB_EXIT_STATUS = 60999"\n'
1127 <        txt += '        echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1128 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1125 >        txt += '    echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1126 >        txt += '    echo "JOB_EXIT_STATUS = 60999"\n'
1127 >        txt += '    echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1128 >        txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
1129 >        txt += '        rm -f $RUNTIME_AREA/$repo \n'
1130 >        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1131 >        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1132          txt += '    fi\n'
1133          txt += 'fi\n'
1134          txt += '\n'
1135 +
1136 +        file_list = ''
1137 +        ## Add to filelist only files to be possibly copied to SE
1138 +        for fileWithSuffix in self.output_file:
1139 +            output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
1140 +            file_list=file_list+output_file_num+' '
1141 +        file_list=file_list[:-1]
1142 +        txt += 'file_list="'+file_list+'"\n'
1143 +
1144          return txt
1145  
1146      def numberFile_(self, file, txt):
# Line 755 | Line 1151 | class Cmssw(JobType):
1151          # take away last extension
1152          name = p[0]
1153          for x in p[1:-1]:
1154 <           name=name+"."+x
1154 >            name=name+"."+x
1155          # add "_txt"
1156          if len(p)>1:
1157 <          ext = p[len(p)-1]
1158 <          #result = name + '_' + str(txt) + "." + ext
763 <          result = name + '_' + txt + "." + ext
1157 >            ext = p[len(p)-1]
1158 >            result = name + '_' + txt + "." + ext
1159          else:
1160 <          #result = name + '_' + str(txt)
766 <          result = name + '_' + txt
1160 >            result = name + '_' + txt
1161          
1162          return result
1163  
1164 <    def getRequirements(self):
1164 >    def getRequirements(self, nj=[]):
1165          """
1166          return job requirements to add to jdl files
1167          """
1168          req = ''
1169 <        if common.analisys_common_info['sites']:
1170 <            if common.analisys_common_info['sw_version']:
1171 <                req='Member("VO-cms-' + \
1172 <                     common.analisys_common_info['sw_version'] + \
1173 <                     '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1174 <            if len(common.analisys_common_info['sites'])>0:
1175 <                req = req + ' && ('
1176 <                for i in range(len(common.analisys_common_info['sites'])):
1177 <                    req = req + 'other.GlueCEInfoHostName == "' \
1178 <                         + common.analisys_common_info['sites'][i] + '"'
1179 <                    if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
786 <                        req = req + ' || '
787 <            req = req + ')'
788 <        #print "req = ", req
1169 >        if self.version:
1170 >            req='Member("VO-cms-' + \
1171 >                 self.version + \
1172 >                 '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1173 >        # if self.executable_arch:
1174 >        #     req='Member("VO-cms-' + \
1175 >        #          self.executable_arch + \
1176 >        #          '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1177 >
1178 >        req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
1179 >
1180          return req
1181  
1182      def configFilename(self):
# Line 802 | Line 1193 | class Cmssw(JobType):
1193          txt += '   echo "### SETUP CMS OSG  ENVIRONMENT ###"\n'
1194          txt += '   if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
1195          txt += '      # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
1196 +        txt += '       export SCRAM_ARCH='+self.executable_arch+'\n'
1197          txt += '       source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
1198 <        txt += '   elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
1199 <        txt += '      # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
1200 <        txt += '       source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
1198 >        txt += '   elif [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n'
1199 >        txt += '      # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n'
1200 >        txt += '       export SCRAM_ARCH='+self.executable_arch+'\n'
1201 >        txt += '       source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n'
1202          txt += '   else\n'
1203 <        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1203 >        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1204          txt += '       echo "JOB_EXIT_STATUS = 10020"\n'
1205          txt += '       echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1206          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1207 +        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1208 +        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1209 +        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1210          txt += '       exit 1\n'
1211          txt += '\n'
1212          txt += '       echo "Remove working directory: $WORKING_DIR"\n'
1213          txt += '       cd $RUNTIME_AREA\n'
1214          txt += '       /bin/rm -rf $WORKING_DIR\n'
1215          txt += '       if [ -d $WORKING_DIR ] ;then\n'
1216 <        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1217 <        txt += '            echo "JOB_EXIT_STATUS = 10017"\n'
1218 <        txt += '            echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1219 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
1216 >        txt += '        echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1217 >        txt += '        echo "JOB_EXIT_STATUS = 10017"\n'
1218 >        txt += '        echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1219 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1220 >        txt += '            rm -f $RUNTIME_AREA/$repo \n'
1221 >        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1222 >        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1223          txt += '       fi\n'
1224          txt += '\n'
1225          txt += '       exit 1\n'
# Line 844 | Line 1243 | class Cmssw(JobType):
1243          txt += '       echo "JOB_EXIT_STATUS = 10031" \n'
1244          txt += '       echo "JobExitCode=10031" | tee -a $RUNTIME_AREA/$repo\n'
1245          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1246 +        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1247 +        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1248 +        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1249          txt += '       exit 1\n'
1250          txt += '   else\n'
1251          txt += '       echo "Sourcing environment... "\n'
# Line 852 | Line 1254 | class Cmssw(JobType):
1254          txt += '           echo "JOB_EXIT_STATUS = 10020"\n'
1255          txt += '           echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1256          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1257 +        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1258 +        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1259 +        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1260          txt += '           exit 1\n'
1261          txt += '       fi\n'
1262          txt += '       echo "sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
# Line 862 | Line 1267 | class Cmssw(JobType):
1267          txt += '           echo "JOB_EXIT_STATUS = 10032"\n'
1268          txt += '           echo "JobExitCode=10032" | tee -a $RUNTIME_AREA/$repo\n'
1269          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1270 +        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1271 +        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1272 +        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1273          txt += '           exit 1\n'
1274          txt += '       fi\n'
1275          txt += '   fi\n'
1276          txt += '   \n'
869        txt += '   string=`cat /etc/redhat-release`\n'
870        txt += '   echo $string\n'
871        txt += '   if [[ $string = *alhalla* ]]; then\n'
872        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
873        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
874        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
875        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
876        txt += '   else\n'
877        txt += '       echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n'
878        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
879        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
880        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
881        txt += '       exit 1\n'
882        txt += '   fi\n'
1277          txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1278          txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1279          return txt
# Line 889 | Line 1283 | class Cmssw(JobType):
1283  
1284      def getParams(self):
1285          return self._params
1286 +
1287 +    def setTaskid_(self):
1288 +        self._taskId = self.cfg_params['taskId']
1289 +        
1290 +    def getTaskid(self):
1291 +        return self._taskId
1292 +
1293 + #######################################################################
1294 +    def uniquelist(self, old):
1295 +        """
1296 +        remove duplicates from a list
1297 +        """
1298 +        nd={}
1299 +        for e in old:
1300 +            nd[e]=0
1301 +        return nd.keys()

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines