ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.13 by gutsche, Tue Jun 27 02:31:31 2006 UTC vs.
Revision 1.111 by farinafa, Tue Aug 7 15:06:49 2007 UTC

# Line 3 | Line 3 | from crab_logger import Logger
3   from crab_exceptions import *
4   from crab_util import *
5   import common
6 import PsetManipulator  
7
8 import DBSInfo_EDM
9 import DataDiscovery_EDM
10 import DataLocation_EDM
6   import Scram
7  
8 < import os, string, re
8 > import os, string, glob
9  
10   class Cmssw(JobType):
11 <    def __init__(self, cfg_params):
11 >    def __init__(self, cfg_params, ncjobs):
12          JobType.__init__(self, 'CMSSW')
13          common.logger.debug(3,'CMSSW::__init__')
14  
20        self.analisys_common_info = {}
21        # Marco.
15          self._params = {}
16          self.cfg_params = cfg_params
17 +
18 +        try:
19 +            self.MaxTarBallSize = float(self.cfg_params['EDG.maxtarballsize'])
20 +        except KeyError:
21 +            self.MaxTarBallSize = 9.5
22 +
23 +        # number of jobs requested to be created, limit obj splitting
24 +        self.ncjobs = ncjobs
25 +
26          log = common.logger
27          
28          self.scram = Scram.Scram(cfg_params)
27        scramArea = ''
29          self.additional_inbox_files = []
30          self.scriptExe = ''
31          self.executable = ''
32 +        self.executable_arch = self.scram.getArch()
33          self.tgz_name = 'default.tgz'
34 +        self.additional_tgz_name = 'additional.tgz'
35 +        self.scriptName = 'CMSSW.sh'
36 +        self.pset = ''      #scrip use case Da  
37 +        self.datasetPath = '' #scrip use case Da
38  
39 +        # set FJR file name
40 +        self.fjrFileName = 'crab_fjr.xml'
41  
42          self.version = self.scram.getSWVersion()
43 +        common.taskDB.setDict('codeVersion',self.version)
44          self.setParam_('application', self.version)
36        common.analisys_common_info['sw_version'] = self.version
37        ### FEDE
38        common.analisys_common_info['copy_input_data'] = 0
39        common.analisys_common_info['events_management'] = 1
45  
46          ### collect Data cards
47 +
48 +        ## get DBS mode
49 +        try:
50 +            self.use_dbs_1 = int(self.cfg_params['CMSSW.use_dbs_1'])
51 +        except KeyError:
52 +            self.use_dbs_1 = 0
53 +            
54          try:
55              tmp =  cfg_params['CMSSW.datasetpath']
56              log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
57              if string.lower(tmp)=='none':
58                  self.datasetPath = None
59 +                self.selectNoInput = 1
60              else:
61                  self.datasetPath = tmp
62 +                self.selectNoInput = 0
63          except KeyError:
64              msg = "Error: datasetpath not defined "  
65              raise CrabException(msg)
# Line 56 | Line 70 | class Cmssw(JobType):
70              self.setParam_('dataset', 'None')
71              self.setParam_('owner', 'None')
72          else:
73 <            datasetpath_split = self.datasetPath.split("/")
74 <            self.setParam_('dataset', datasetpath_split[1])
75 <            self.setParam_('owner', datasetpath_split[-1])
76 <
73 >            try:
74 >                datasetpath_split = self.datasetPath.split("/")
75 >                # standard style
76 >                if self.use_dbs_1 == 1 :
77 >                    self.setParam_('dataset', datasetpath_split[1])
78 >                    self.setParam_('owner', datasetpath_split[-1])
79 >                else:
80 >                    self.setParam_('dataset', datasetpath_split[1])
81 >                    self.setParam_('owner', datasetpath_split[2])
82 >            except:
83 >                self.setParam_('dataset', self.datasetPath)
84 >                self.setParam_('owner', self.datasetPath)
85 >                
86          self.setTaskid_()
87          self.setParam_('taskId', self.cfg_params['taskId'])
88  
# Line 82 | Line 105 | class Cmssw(JobType):
105          try:
106              self.pset = cfg_params['CMSSW.pset']
107              log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
108 <            if (not os.path.exists(self.pset)):
109 <                raise CrabException("User defined PSet file "+self.pset+" does not exist")
108 >            if self.pset.lower() != 'none' :
109 >                if (not os.path.exists(self.pset)):
110 >                    raise CrabException("User defined PSet file "+self.pset+" does not exist")
111 >            else:
112 >                self.pset = None
113          except KeyError:
114              raise CrabException("PSet file missing. Cannot run cmsRun ")
115  
116          # output files
117 +        ## stuff which must be returned always via sandbox
118 +        self.output_file_sandbox = []
119 +
120 +        # add fjr report by default via sandbox
121 +        self.output_file_sandbox.append(self.fjrFileName)
122 +
123 +        # other output files to be returned via sandbox or copied to SE
124          try:
125              self.output_file = []
93
126              tmp = cfg_params['CMSSW.output_file']
127              if tmp != '':
128                  tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
# Line 100 | Line 132 | class Cmssw(JobType):
132                      self.output_file.append(tmp)
133                      pass
134              else:
135 <                log.message("No output file defined: only stdout/err will be available")
135 >                log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
136                  pass
137              pass
138          except KeyError:
139 <            log.message("No output file defined: only stdout/err will be available")
139 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
140              pass
141  
142          # script_exe file as additional file in inputSandbox
143          try:
144              self.scriptExe = cfg_params['USER.script_exe']
113            self.additional_inbox_files.append(self.scriptExe)
145              if self.scriptExe != '':
146                 if not os.path.isfile(self.scriptExe):
147 <                  msg ="WARNING. file "+self.scriptExe+" not found"
147 >                  msg ="ERROR. file "+self.scriptExe+" not found"
148                    raise CrabException(msg)
149 +               self.additional_inbox_files.append(string.strip(self.scriptExe))
150          except KeyError:
151 <           pass
152 <                  
151 >            self.scriptExe = ''
152 >
153 >        #CarlosDaniele
154 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
155 >           msg ="Error. script_exe  not defined"
156 >           raise CrabException(msg)
157 >
158          ## additional input files
159          try:
160 <            tmpAddFiles = string.split(cfg_params['CMSSW.additional_input_files'],',')
160 >            tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
161              for tmp in tmpAddFiles:
162 <                if not os.path.exists(tmp):
163 <                    raise CrabException("Additional input file not found: "+tmp)
164 <                tmp=string.strip(tmp)
165 <                self.additional_inbox_files.append(tmp)
162 >                tmp = string.strip(tmp)
163 >                dirname = ''
164 >                if not tmp[0]=="/": dirname = "."
165 >                files = []
166 >                if string.find(tmp,"*")>-1:
167 >                    files = glob.glob(os.path.join(dirname, tmp))
168 >                    if len(files)==0:
169 >                        raise CrabException("No additional input file found with this pattern: "+tmp)
170 >                else:
171 >                    files.append(tmp)
172 >                for file in files:
173 >                    if not os.path.exists(file):
174 >                        raise CrabException("Additional input file not found: "+file)
175 >                    pass
176 >                    # fname = string.split(file, '/')[-1]
177 >                    # storedFile = common.work_space.pathForTgz()+'share/'+fname
178 >                    # shutil.copyfile(file, storedFile)
179 >                    self.additional_inbox_files.append(string.strip(file))
180                  pass
181              pass
182 +            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
183          except KeyError:
184              pass
185  
186          # files per job
187          try:
188 <            self.filesPerJob = int(cfg_params['CMSSW.files_per_jobs']) #Daniele
189 <            self.selectFilesPerJob = 1
188 >            if (cfg_params['CMSSW.files_per_jobs']):
189 >                raise CrabException("files_per_jobs no longer supported.  Quitting.")
190          except KeyError:
191 <            self.filesPerJob = 0
140 <            self.selectFilesPerJob = 0
191 >            pass
192  
193          ## Events per job
194          try:
# Line 147 | Line 198 | class Cmssw(JobType):
198              self.eventsPerJob = -1
199              self.selectEventsPerJob = 0
200      
201 <        # To be implemented
202 <        # ## number of jobs
203 <        # try:
204 <        #     self.numberOfJobs =int( cfg_params['CMSSW.number_of_job'])
205 <        #     self.selectNumberOfJobs = 1
206 <        # except KeyError:
207 <        #     self.selectNumberOfJobs = 0
157 <
158 <        if (self.selectFilesPerJob == self.selectEventsPerJob):
159 <            msg = 'Must define either files_per_jobs or events_per_job'
160 <            raise CrabException(msg)
201 >        ## number of jobs
202 >        try:
203 >            self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs'])
204 >            self.selectNumberOfJobs = 1
205 >        except KeyError:
206 >            self.theNumberOfJobs = 0
207 >            self.selectNumberOfJobs = 0
208  
162        if (self.selectEventsPerJob  and not self.datasetPath == None):
163            msg = 'Splitting according to events_per_job available only with None as datasetpath'
164            raise CrabException(msg)
165    
209          try:
210              self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
211 +            self.selectTotalNumberEvents = 1
212          except KeyError:
213 <            msg = 'Must define total_number_of_events'
214 <            raise CrabException(msg)
215 <        
216 <        CEBlackList = []
213 >            self.total_number_of_events = 0
214 >            self.selectTotalNumberEvents = 0
215 >
216 >        if self.pset != None: #CarlosDaniele
217 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
218 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
219 >                 raise CrabException(msg)
220 >        else:
221 >             if (self.selectNumberOfJobs == 0):
222 >                 msg = 'Must specify  number_of_jobs.'
223 >                 raise CrabException(msg)
224 >
225 >        ## source seed for pythia
226          try:
227 <            tmpBad = string.split(cfg_params['EDG.ce_black_list'],',')
175 <            for tmp in tmpBad:
176 <                tmp=string.strip(tmp)
177 <                CEBlackList.append(tmp)
227 >            self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
228          except KeyError:
229 <            pass
229 >            self.sourceSeed = None
230 >            common.logger.debug(5,"No seed given")
231  
232 <        self.reCEBlackList=[]
233 <        for bad in CEBlackList:
183 <            self.reCEBlackList.append(re.compile( bad ))
184 <
185 <        common.logger.debug(5,'CEBlackList: '+str(CEBlackList))
186 <
187 <        CEWhiteList = []
188 <        try:
189 <            tmpGood = string.split(cfg_params['EDG.ce_white_list'],',')
190 <            for tmp in tmpGood:
191 <                tmp=string.strip(tmp)
192 <                CEWhiteList.append(tmp)
232 >        try:
233 >            self.sourceSeedVtx = int(cfg_params['CMSSW.vtx_seed'])
234          except KeyError:
235 <            pass
235 >            self.sourceSeedVtx = None
236 >            common.logger.debug(5,"No vertex seed given")
237  
238 <        #print 'CEWhiteList: ',CEWhiteList
239 <        self.reCEWhiteList=[]
240 <        for Good in CEWhiteList:
241 <            self.reCEWhiteList.append(re.compile( Good ))
238 >        try:
239 >            self.sourceSeedG4 = int(cfg_params['CMSSW.g4_seed'])
240 >        except KeyError:
241 >            self.sourceSeedG4 = None
242 >            common.logger.debug(5,"No g4 sim hits seed given")
243  
244 <        common.logger.debug(5,'CEWhiteList: '+str(CEWhiteList))
244 >        try:
245 >            self.sourceSeedMix = int(cfg_params['CMSSW.mix_seed'])
246 >        except KeyError:
247 >            self.sourceSeedMix = None
248 >            common.logger.debug(5,"No mix seed given")
249  
250 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
250 >        try:
251 >            self.firstRun = int(cfg_params['CMSSW.first_run'])
252 >        except KeyError:
253 >            self.firstRun = None
254 >            common.logger.debug(5,"No first run given")
255 >        if self.pset != None: #CarlosDaniele
256 >            ver = string.split(self.version,"_")
257 >            if (int(ver[1])>=1 and int(ver[2])>=5):
258 >                import PsetManipulator150 as pp
259 >            else:
260 >                import PsetManipulator as pp
261 >            PsetEdit = pp.PsetManipulator(self.pset) #Daniele Pset
262  
263          #DBSDLS-start
264          ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
265          self.maxEvents=0  # max events available   ( --> check the requested nb. of evts in Creator.py)
266          self.DBSPaths={}  # all dbs paths requested ( --> input to the site local discovery script)
267 +        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
268          ## Perform the data location and discovery (based on DBS/DLS)
269          ## SL: Don't if NONE is specified as input (pythia use case)
270 <        common.analisys_common_info['sites']=None
270 >        blockSites = {}
271          if self.datasetPath:
272 <            self.DataDiscoveryAndLocation(cfg_params)
272 >            blockSites = self.DataDiscoveryAndLocation(cfg_params)
273          #DBSDLS-end          
274  
275          self.tgzNameWithPath = self.getTarBall(self.executable)
217
218        # modify Pset
219        if (self.datasetPath): # standard job
220            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
221            self.PsetEdit.inputModule("INPUT") #Daniele
222
223        else:  # pythia like job
224            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
225            self.PsetEdit.pythiaSeed("INPUT") #Daniele
226            try:
227                self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
228            except KeyError:
229                self.sourceSeed = 123456
230                common.logger.message("No seed given, will use "+str(self.sourceSeed))
231        
232        self.PsetEdit.psetWriter(self.configFilename())
276      
277          ## Select Splitting
278 <        if self.selectFilesPerJob: self.jobSplittingPerFiles()
279 <        elif self.selectEventsPerJob: self.jobSplittingPerEvents()
278 >        if self.selectNoInput:
279 >            if self.pset == None: #CarlosDaniele
280 >                self.jobSplittingForScript()
281 >            else:
282 >                self.jobSplittingNoInput()
283          else:
284 <            msg = 'Don\'t know how to split...'
239 <            raise CrabException(msg)
284 >            self.jobSplittingByBlocks(blockSites)
285  
286 +        # modify Pset
287 +        if self.pset != None: #CarlosDaniele
288 +            try:
289 +                if (self.datasetPath): # standard job
290 +                    # allow to processa a fraction of events in a file
291 +                    PsetEdit.inputModule("INPUT")
292 +                    PsetEdit.maxEvent("INPUTMAXEVENTS")
293 +                    PsetEdit.skipEvent("INPUTSKIPEVENTS")
294 +                else:  # pythia like job
295 +                    PsetEdit.maxEvent(self.eventsPerJob)
296 +                    if (self.firstRun):
297 +                        PsetEdit.pythiaFirstRun("INPUTFIRSTRUN")  #First Run
298 +                    if (self.sourceSeed) :
299 +                        PsetEdit.pythiaSeed("INPUT")
300 +                        if (self.sourceSeedVtx) :
301 +                            PsetEdit.vtxSeed("INPUTVTX")
302 +                        if (self.sourceSeedG4) :
303 +                            self.PsetEdit.g4Seed("INPUTG4")
304 +                        if (self.sourceSeedMix) :
305 +                            self.PsetEdit.mixSeed("INPUTMIX")
306 +                # add FrameworkJobReport to parameter-set
307 +                PsetEdit.addCrabFJR(self.fjrFileName)
308 +                PsetEdit.psetWriter(self.configFilename())
309 +            except:
310 +                msg='Error while manipuliating ParameterSet: exiting...'
311 +                raise CrabException(msg)
312  
313      def DataDiscoveryAndLocation(self, cfg_params):
314  
315 +        import DataDiscovery
316 +        import DataDiscovery_DBS2
317 +        import DataLocation
318          common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()")
319  
320          datasetPath=self.datasetPath
321  
248        ## TODO
249        dataTiersList = ""
250        dataTiers = dataTiersList.split(',')
251
322          ## Contact the DBS
323 +        common.logger.message("Contacting Data Discovery Services ...")
324          try:
325 <            self.pubdata=DataDiscovery_EDM.DataDiscovery_EDM(datasetPath, dataTiers, cfg_params)
325 >
326 >            if self.use_dbs_1 == 1 :
327 >                self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
328 >            else :
329 >                self.pubdata=DataDiscovery_DBS2.DataDiscovery_DBS2(datasetPath, cfg_params)
330              self.pubdata.fetchDBSInfo()
331  
332 <        except DataDiscovery_EDM.NotExistingDatasetError, ex :
332 >        except DataDiscovery.NotExistingDatasetError, ex :
333              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
334              raise CrabException(msg)
335 <
261 <        except DataDiscovery_EDM.NoDataTierinProvenanceError, ex :
335 >        except DataDiscovery.NoDataTierinProvenanceError, ex :
336              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
337              raise CrabException(msg)
338 <        except DataDiscovery_EDM.DataDiscoveryError, ex:
339 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
338 >        except DataDiscovery.DataDiscoveryError, ex:
339 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
340 >            raise CrabException(msg)
341 >        except DataDiscovery_DBS2.NotExistingDatasetError_DBS2, ex :
342 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
343 >            raise CrabException(msg)
344 >        except DataDiscovery_DBS2.NoDataTierinProvenanceError_DBS2, ex :
345 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
346 >            raise CrabException(msg)
347 >        except DataDiscovery_DBS2.DataDiscoveryError_DBS2, ex:
348 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
349              raise CrabException(msg)
350  
351 <        ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
352 <        ## self.DBSPaths=self.pubdata.getDBSPaths()
353 <        common.logger.message("Required data are :"+self.datasetPath)
271 <
272 <        filesbyblock=self.pubdata.getFiles()
273 <        self.AllInputFiles=filesbyblock.values()
274 <        self.files = self.AllInputFiles        
275 <
276 <        ## TEMP
277 <    #    self.filesTmp = filesbyblock.values()
278 <    #    self.files = []
279 <    #    locPath='rfio:cmsbose2.bo.infn.it:/flatfiles/SE00/cms/fanfani/ProdTest/'
280 <    #    locPath=''
281 <    #    tmp = []
282 <    #    for file in self.filesTmp[0]:
283 <    #        tmp.append(locPath+file)
284 <    #    self.files.append(tmp)
285 <        ## END TEMP
351 >        self.filesbyblock=self.pubdata.getFiles()
352 >        self.eventsbyblock=self.pubdata.getEventsPerBlock()
353 >        self.eventsbyfile=self.pubdata.getEventsPerFile()
354  
355          ## get max number of events
288        #common.logger.debug(10,"number of events for primary fileblocks %i"%self.pubdata.getMaxEvents())
356          self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
290        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
357  
358          ## Contact the DLS and build a list of sites hosting the fileblocks
359          try:
360 <            dataloc=DataLocation_EDM.DataLocation_EDM(filesbyblock.keys(),cfg_params)
360 >            dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params)
361              dataloc.fetchDLSInfo()
362 <        except DataLocation_EDM.DataLocationError , ex:
362 >        except DataLocation.DataLocationError , ex:
363              msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage()
364              raise CrabException(msg)
365          
300        allsites=dataloc.getSites()
301        common.logger.debug(5,"sites are %s"%allsites)
302        sites=self.checkBlackList(allsites)
303        common.logger.debug(5,"sites are (after black list) %s"%sites)
304        sites=self.checkWhiteList(sites)
305        common.logger.debug(5,"sites are (after white list) %s"%sites)
366  
367 <        if len(sites)==0:
368 <            msg = 'No sites hosting all the needed data! Exiting... '
369 <            raise CrabException(msg)
367 >        sites = dataloc.getSites()
368 >        allSites = []
369 >        listSites = sites.values()
370 >        for listSite in listSites:
371 >            for oneSite in listSite:
372 >                allSites.append(oneSite)
373 >        allSites = self.uniquelist(allSites)
374  
375 <        common.logger.message("List of Sites hosting the data : "+str(sites))
376 <        common.logger.debug(6, "List of Sites: "+str(sites))
377 <        common.analisys_common_info['sites']=sites    ## used in SchedulerEdg.py in createSchScript
378 <        self.setParam_('TargetCE', ','.join(sites))
315 <        return
375 >        # screen output
376 >        common.logger.message("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n")
377 >
378 >        return sites
379      
380 <    def jobSplittingPerFiles(self):
380 >    def jobSplittingByBlocks(self, blockSites):
381          """
382 <        Perform job splitting based on number of files to be accessed per job
383 <        """
384 <        common.logger.debug(5,'Splitting per input files')
385 <        common.logger.message('Required '+str(self.filesPerJob)+' files per job ')
386 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
387 <
388 <        ## TODO: SL need to have (from DBS) a detailed list of how many events per each file
389 <        n_tot_files = (len(self.files[0]))
390 <        #print "n_tot_files = ", n_tot_files
391 <        ## SL: this is wrong if the files have different number of events
392 <        #print "self.maxEvents = ", self.maxEvents
393 <        evPerFile = int(self.maxEvents)/n_tot_files
394 <        #print "evPerFile = int(self.maxEvents)/n_tot_files =  ", evPerFile
395 <
396 <        common.logger.debug(5,'Events per File '+str(evPerFile))
397 <
398 <        ## if asked to process all events, do it
399 <        if self.total_number_of_events == -1:
400 <            self.total_number_of_events=self.maxEvents
401 <            self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
402 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for all available events '+str(self.total_number_of_events)+' events')
403 <        
382 >        Perform job splitting. Jobs run over an integer number of files
383 >        and no more than one block.
384 >        ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values
385 >        REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
386 >                  self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
387 >                  self.maxEvents, self.filesbyblock
388 >        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
389 >              self.total_number_of_jobs - Total # of jobs
390 >              self.list_of_args - File(s) job will run on (a list of lists)
391 >        """
392 >
393 >        # ---- Handle the possible job splitting configurations ---- #
394 >        if (self.selectTotalNumberEvents):
395 >            totalEventsRequested = self.total_number_of_events
396 >        if (self.selectEventsPerJob):
397 >            eventsPerJobRequested = self.eventsPerJob
398 >            if (self.selectNumberOfJobs):
399 >                totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob
400 >
401 >        # If user requested all the events in the dataset
402 >        if (totalEventsRequested == -1):
403 >            eventsRemaining=self.maxEvents
404 >        # If user requested more events than are in the dataset
405 >        elif (totalEventsRequested > self.maxEvents):
406 >            eventsRemaining = self.maxEvents
407 >            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
408 >        # If user requested less events than are in the dataset
409          else:
410 <            #print "self.total_number_of_events = ", self.total_number_of_events
411 <            #print "evPerFile = ", evPerFile
412 <            self.total_number_of_files = int(self.total_number_of_events/evPerFile)
413 <            #print "self.total_number_of_files = int(self.total_number_of_events/evPerFile) = " , self.total_number_of_files
414 <            ## SL: if ask for less event than what is computed to be available on a
347 <            ##     file, process the first file anyhow.
348 <            if self.total_number_of_files == 0:
349 <                self.total_number_of_files = self.total_number_of_files + 1
350 <                
410 >            eventsRemaining = totalEventsRequested
411 >
412 >        # If user requested more events per job than are in the dataset
413 >        if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents):
414 >            eventsPerJobRequested = self.maxEvents
415  
416 <            common.logger.debug(5,'N files  '+str(self.total_number_of_files))
416 >        # For user info at end
417 >        totalEventCount = 0
418  
419 <            check = 0
419 >        if (self.selectTotalNumberEvents and self.selectNumberOfJobs):
420 >            eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
421 >
422 >        if (self.selectNumberOfJobs):
423 >            common.logger.message("May not create the exact number_of_jobs requested.")
424 >
425 >        if ( self.ncjobs == 'all' ) :
426 >            totalNumberOfJobs = 999999999
427 >        else :
428 >            totalNumberOfJobs = self.ncjobs
429              
356            ## Compute the number of jobs
357            #self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
358            #print "self.total_number_of_files = ", self.total_number_of_files
359            #print "self.filesPerJob = ", self.filesPerJob
360            self.total_number_of_jobs = int(self.total_number_of_files/self.filesPerJob)
361            #print "self.total_number_of_jobs = ", self.total_number_of_jobs
362            common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
363
364            ## is there any remainder?
365            check = int(self.total_number_of_files) - (int(self.total_number_of_jobs)*self.filesPerJob)
366
367            common.logger.debug(5,'Check  '+str(check))
368
369            if check > 0:
370                self.total_number_of_jobs =  self.total_number_of_jobs + 1
371                common.logger.message('Warning: last job will be created with '+str(check)+' files')
430  
431 <            #common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs-1)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
432 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
433 <            pass
431 >        blocks = blockSites.keys()
432 >        blockCount = 0
433 >        # Backup variable in case self.maxEvents counted events in a non-included block
434 >        numBlocksInDataset = len(blocks)
435  
436 +        jobCount = 0
437          list_of_lists = []
438 <        for i in xrange(0, int(n_tot_files), self.filesPerJob):
439 <            parString = "\\{"
438 >
439 >        # list tracking which jobs are in which jobs belong to which block
440 >        jobsOfBlock = {}
441 >
442 >        # ---- Iterate over the blocks in the dataset until ---- #
443 >        # ---- we've met the requested total # of events    ---- #
444 >        while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
445 >            block = blocks[blockCount]
446 >            blockCount += 1
447 >            if block not in jobsOfBlock.keys() :
448 >                jobsOfBlock[block] = []
449              
450 <            params = self.files[0][i: i+self.filesPerJob]
451 <            for i in range(len(params) - 1):
452 <                parString += '\\\"' + params[i] + '\\\"\,'
450 >            if self.eventsbyblock.has_key(block) :
451 >                numEventsInBlock = self.eventsbyblock[block]
452 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
453              
454 <            parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
455 <            list_of_lists.append(parString)
456 <            pass
454 >                files = self.filesbyblock[block]
455 >                numFilesInBlock = len(files)
456 >                if (numFilesInBlock <= 0):
457 >                    continue
458 >                fileCount = 0
459 >
460 >                # ---- New block => New job ---- #
461 >                parString = "\\{"
462 >                # counter for number of events in files currently worked on
463 >                filesEventCount = 0
464 >                # flag if next while loop should touch new file
465 >                newFile = 1
466 >                # job event counter
467 >                jobSkipEventCount = 0
468 >            
469 >                # ---- Iterate over the files in the block until we've met the requested ---- #
470 >                # ---- total # of events or we've gone over all the files in this block  ---- #
471 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
472 >                    file = files[fileCount]
473 >                    if newFile :
474 >                        try:
475 >                            numEventsInFile = self.eventsbyfile[file]
476 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
477 >                            # increase filesEventCount
478 >                            filesEventCount += numEventsInFile
479 >                            # Add file to current job
480 >                            parString += '\\\"' + file + '\\\"\,'
481 >                            newFile = 0
482 >                        except KeyError:
483 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
484 >                        
485 >
486 >                    # if less events in file remain than eventsPerJobRequested
487 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) :
488 >                        # if last file in block
489 >                        if ( fileCount == numFilesInBlock-1 ) :
490 >                            # end job using last file, use remaining events in block
491 >                            # close job and touch new file
492 >                            fullString = parString[:-2]
493 >                            fullString += '\\}'
494 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
495 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
496 >                            self.jobDestination.append(blockSites[block])
497 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
498 >                            # fill jobs of block dictionary
499 >                            jobsOfBlock[block].append(jobCount+1)
500 >                            # reset counter
501 >                            jobCount = jobCount + 1
502 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
503 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
504 >                            jobSkipEventCount = 0
505 >                            # reset file
506 >                            parString = "\\{"
507 >                            filesEventCount = 0
508 >                            newFile = 1
509 >                            fileCount += 1
510 >                        else :
511 >                            # go to next file
512 >                            newFile = 1
513 >                            fileCount += 1
514 >                    # if events in file equal to eventsPerJobRequested
515 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
516 >                        # close job and touch new file
517 >                        fullString = parString[:-2]
518 >                        fullString += '\\}'
519 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
520 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
521 >                        self.jobDestination.append(blockSites[block])
522 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
523 >                        jobsOfBlock[block].append(jobCount+1)
524 >                        # reset counter
525 >                        jobCount = jobCount + 1
526 >                        totalEventCount = totalEventCount + eventsPerJobRequested
527 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
528 >                        jobSkipEventCount = 0
529 >                        # reset file
530 >                        parString = "\\{"
531 >                        filesEventCount = 0
532 >                        newFile = 1
533 >                        fileCount += 1
534 >                        
535 >                    # if more events in file remain than eventsPerJobRequested
536 >                    else :
537 >                        # close job but don't touch new file
538 >                        fullString = parString[:-2]
539 >                        fullString += '\\}'
540 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
541 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
542 >                        self.jobDestination.append(blockSites[block])
543 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
544 >                        jobsOfBlock[block].append(jobCount+1)
545 >                        # increase counter
546 >                        jobCount = jobCount + 1
547 >                        totalEventCount = totalEventCount + eventsPerJobRequested
548 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
549 >                        # calculate skip events for last file
550 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
551 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
552 >                        # remove all but the last file
553 >                        filesEventCount = self.eventsbyfile[file]
554 >                        parString = "\\{"
555 >                        parString += '\\\"' + file + '\\\"\,'
556 >                    pass # END if
557 >                pass # END while (iterate over files in the block)
558 >        pass # END while (iterate over blocks in the dataset)
559 >        self.ncjobs = self.total_number_of_jobs = jobCount
560 >        if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
561 >            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
562 >        common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
563 >        
564 >        # screen output
565 >        screenOutput = "List of jobs and available destination sites:\n\n"
566 >
567 >        blockCounter = 0
568 >        for block in blocks:
569 >            if block in jobsOfBlock.keys() :
570 >                blockCounter += 1
571 >                screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),','.join(blockSites[block]))
572 >
573 >       # common.logger.message(screenOutput)
574  
575          self.list_of_args = list_of_lists
390        #print self.list_of_args
576          return
577  
578 <    def jobSplittingPerEvents(self):
578 >    def jobSplittingNoInput(self):
579          """
580          Perform job splitting based on number of event per job
581          """
582          common.logger.debug(5,'Splitting per events')
583          common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
584 +        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
585          common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
586  
587          if (self.total_number_of_events < 0):
588              msg='Cannot split jobs per Events with "-1" as total number of events'
589              raise CrabException(msg)
590  
591 <        self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
592 <
593 <        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
594 <        print "self.total_number_of_events = ", self.total_number_of_events
595 <        print "self.eventsPerJob = ", self.eventsPerJob
596 <        print "self.total_number_of_jobs = ", self.total_number_of_jobs
597 <        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
598 <        
591 >        if (self.selectEventsPerJob):
592 >            if (self.selectTotalNumberEvents):
593 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
594 >            elif(self.selectNumberOfJobs) :  
595 >                self.total_number_of_jobs =self.theNumberOfJobs
596 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
597 >
598 >        elif (self.selectNumberOfJobs) :
599 >            self.total_number_of_jobs = self.theNumberOfJobs
600 >            self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
601 >
602          common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
603  
604          # is there any remainder?
# Line 417 | Line 606 | class Cmssw(JobType):
606  
607          common.logger.debug(5,'Check  '+str(check))
608  
609 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
610          if check > 0:
611 <            common.logger.message('Warning: asked '+self.total_number_of_events+' but will do only '+(int(self.total_number_of_jobs)*self.eventsPerJob))
422 <
423 <        common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
611 >            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
612  
613          # argument is seed number.$i
614          self.list_of_args = []
615          for i in range(self.total_number_of_jobs):
616 <            self.list_of_args.append(int(str(self.sourceSeed)+str(i)))
617 <        print self.list_of_args
616 >            ## Since there is no input, any site is good
617 >           # self.jobDestination.append(["Any"])
618 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
619 >            args=[]
620 >            if (self.firstRun):
621 >                    ## pythia first run
622 >                #self.list_of_args.append([(str(self.firstRun)+str(i))])
623 >                args.append(str(self.firstRun)+str(i))
624 >            else:
625 >                ## no first run
626 >                #self.list_of_args.append([str(i)])
627 >                args.append(str(i))
628 >            if (self.sourceSeed):
629 >                args.append(str(self.sourceSeed)+str(i))
630 >                if (self.sourceSeedVtx):
631 >                    ## + vtx random seed
632 >                    args.append(str(self.sourceSeedVtx)+str(i))
633 >                if (self.sourceSeedG4):
634 >                    ## + G4 random seed
635 >                    args.append(str(self.sourceSeedG4)+str(i))
636 >                if (self.sourceSeedMix):    
637 >                    ## + Mix random seed
638 >                    args.append(str(self.sourceSeedMix)+str(i))
639 >                pass
640 >            pass
641 >            self.list_of_args.append(args)
642 >        pass
643 >            
644 >        # print self.list_of_args
645 >
646 >        return
647 >
648 >
649 >    def jobSplittingForScript(self):#CarlosDaniele
650 >        """
651 >        Perform job splitting based on number of job
652 >        """
653 >        common.logger.debug(5,'Splitting per job')
654 >        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
655 >
656 >        self.total_number_of_jobs = self.theNumberOfJobs
657 >
658 >        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
659 >
660 >        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
661  
662 +        # argument is seed number.$i
663 +        self.list_of_args = []
664 +        for i in range(self.total_number_of_jobs):
665 +            ## Since there is no input, any site is good
666 +           # self.jobDestination.append(["Any"])
667 +            self.jobDestination.append([""])
668 +            ## no random seed
669 +            self.list_of_args.append([str(i)])
670          return
671  
672      def split(self, jobParams):
# Line 441 | Line 680 | class Cmssw(JobType):
680              jobParams.append("")
681          
682          for job in range(njobs):
683 <            jobParams[job] = str(arglist[job])
683 >            jobParams[job] = arglist[job]
684 >            # print str(arglist[job])
685 >            # print jobParams[job]
686              common.jobDB.setArguments(job, jobParams[job])
687 +            common.logger.debug(5,"Job "+str(job)+" Destination: "+str(self.jobDestination[job]))
688 +            common.jobDB.setDestination(job, self.jobDestination[job])
689  
690          common.jobDB.save()
691          return
692      
693      def getJobTypeArguments(self, nj, sched):
694 <        return common.jobDB.arguments(nj)
694 >        result = ''
695 >        for i in common.jobDB.arguments(nj):
696 >            result=result+str(i)+" "
697 >        return result
698    
699      def numberOfJobs(self):
700          # Fabio
701          return self.total_number_of_jobs
702  
457    def checkBlackList(self, allSites):
458        if len(self.reCEBlackList)==0: return allSites
459        sites = []
460        for site in allSites:
461            common.logger.debug(10,'Site '+site)
462            good=1
463            for re in self.reCEBlackList:
464                if re.search(site):
465                    common.logger.message('CE in black list, skipping site '+site)
466                    good=0
467                pass
468            if good: sites.append(site)
469        if len(sites) == 0:
470            common.logger.debug(3,"No sites found after BlackList")
471        return sites
472
473    def checkWhiteList(self, allSites):
474
475        if len(self.reCEWhiteList)==0: return allSites
476        sites = []
477        for site in allSites:
478            good=0
479            for re in self.reCEWhiteList:
480                if re.search(site):
481                    common.logger.debug(5,'CE in white list, adding site '+site)
482                    good=1
483                if not good: continue
484                sites.append(site)
485        if len(sites) == 0:
486            common.logger.message("No sites found after WhiteList\n")
487        else:
488            common.logger.debug(5,"Selected sites via WhiteList are "+str(sites)+"\n")
489        return sites
490
703      def getTarBall(self, exe):
704          """
705          Return the TarBall with lib and exe
706          """
707          
708          # if it exist, just return it
709 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
709 >        #
710 >        # Marco. Let's start to use relative path for Boss XML files
711 >        #
712 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
713          if os.path.exists(self.tgzNameWithPath):
714              return self.tgzNameWithPath
715  
# Line 508 | Line 723 | class Cmssw(JobType):
723          # First of all declare the user Scram area
724          swArea = self.scram.getSWArea_()
725          #print "swArea = ", swArea
726 <        swVersion = self.scram.getSWVersion()
727 <        #print "swVersion = ", swVersion
726 >        # swVersion = self.scram.getSWVersion()
727 >        # print "swVersion = ", swVersion
728          swReleaseTop = self.scram.getReleaseTop_()
729          #print "swReleaseTop = ", swReleaseTop
730          
# Line 517 | Line 732 | class Cmssw(JobType):
732          if swReleaseTop == '' or swArea == swReleaseTop:
733              return
734  
735 <        filesToBeTarred = []
736 <        ## First find the executable
737 <        if (self.executable != ''):
738 <            exeWithPath = self.scram.findFile_(executable)
739 < #           print exeWithPath
740 <            if ( not exeWithPath ):
741 <                raise CrabException('User executable '+executable+' not found')
742 <
743 <            ## then check if it's private or not
744 <            if exeWithPath.find(swReleaseTop) == -1:
745 <                # the exe is private, so we must ship
746 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
747 <                path = swArea+'/'
748 <                exe = string.replace(exeWithPath, path,'')
749 <                filesToBeTarred.append(exe)
750 <                pass
751 <            else:
752 <                # the exe is from release, we'll find it on WN
753 <                pass
754 <
755 <        ## Now get the libraries: only those in local working area
756 <        libDir = 'lib'
757 <        lib = swArea+'/' +libDir
758 <        common.logger.debug(5,"lib "+lib+" to be tarred")
759 <        if os.path.exists(lib):
760 <            filesToBeTarred.append(libDir)
761 <
762 <        ## Now check if module dir is present
763 <        moduleDir = 'module'
764 <        if os.path.isdir(swArea+'/'+moduleDir):
765 <            filesToBeTarred.append(moduleDir)
766 <
767 <        ## Now check if the Data dir is present
768 <        dataDir = 'src/Data/'
769 <        if os.path.isdir(swArea+'/'+dataDir):
770 <            filesToBeTarred.append(dataDir)
771 <
772 <        ## Create the tar-ball
773 <        if len(filesToBeTarred)>0:
774 <            cwd = os.getcwd()
775 <            os.chdir(swArea)
776 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
777 <            for line in filesToBeTarred:
778 <                tarcmd = tarcmd + line + ' '
779 <            cout = runCommand(tarcmd)
780 <            if not cout:
781 <                raise CrabException('Could not create tar-ball')
782 <            os.chdir(cwd)
783 <        else:
784 <            common.logger.debug(5,"No files to be to be tarred")
735 >        import tarfile
736 >        try: # create tar ball
737 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
738 >            ## First find the executable
739 >            if (self.executable != ''):
740 >                exeWithPath = self.scram.findFile_(executable)
741 >                if ( not exeWithPath ):
742 >                    raise CrabException('User executable '+executable+' not found')
743 >    
744 >                ## then check if it's private or not
745 >                if exeWithPath.find(swReleaseTop) == -1:
746 >                    # the exe is private, so we must ship
747 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
748 >                    path = swArea+'/'
749 >                    # distinguish case when script is in user project area or given by full path somewhere else
750 >                    if exeWithPath.find(path) >= 0 :
751 >                        exe = string.replace(exeWithPath, path,'')
752 >                        tar.add(path+exe,os.path.basename(executable))
753 >                    else :
754 >                        tar.add(exeWithPath,os.path.basename(executable))
755 >                    pass
756 >                else:
757 >                    # the exe is from release, we'll find it on WN
758 >                    pass
759 >    
760 >            ## Now get the libraries: only those in local working area
761 >            libDir = 'lib'
762 >            lib = swArea+'/' +libDir
763 >            common.logger.debug(5,"lib "+lib+" to be tarred")
764 >            if os.path.exists(lib):
765 >                tar.add(lib,libDir)
766 >    
767 >            ## Now check if module dir is present
768 >            moduleDir = 'module'
769 >            module = swArea + '/' + moduleDir
770 >            if os.path.isdir(module):
771 >                tar.add(module,moduleDir)
772 >
773 >            ## Now check if any data dir(s) is present
774 >            swAreaLen=len(swArea)
775 >            for root, dirs, files in os.walk(swArea):
776 >                if "data" in dirs:
777 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
778 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
779 >
780 >            ## Add ProdAgent dir to tar
781 >            paDir = 'ProdAgentApi'
782 >            pa = os.environ['CRABDIR'] + '/' + 'ProdAgentApi'
783 >            if os.path.isdir(pa):
784 >                tar.add(pa,paDir)
785 >
786 >            ### FEDE FOR DBS PUBLICATION
787 >            ## Add PRODCOMMON dir to tar
788 >            prodcommonDir = 'ProdCommon'
789 >            prodcommonPath = os.environ['CRABDIR'] + '/' + 'ProdCommon'
790 >            if os.path.isdir(prodcommonPath):
791 >                tar.add(prodcommonPath,prodcommonDir)
792 >            #############################    
793 >        
794 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
795 >            tar.close()
796 >        except :
797 >            raise CrabException('Could not create tar-ball')
798 >
799 >        ## check for tarball size
800 >        tarballinfo = os.stat(self.tgzNameWithPath)
801 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
802 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
803 >
804 >        ## create tar-ball with ML stuff
805 >        self.MLtgzfile =  common.work_space.pathForTgz()+'share/MLfiles.tgz'
806 >        try:
807 >            tar = tarfile.open(self.MLtgzfile, "w:gz")
808 >            path=os.environ['CRABDIR'] + '/python/'
809 >            for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py']:
810 >                tar.add(path+file,file)
811 >            common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames()))
812 >            tar.close()
813 >        except :
814 >            raise CrabException('Could not create ML files tar-ball')
815          
816          return
817          
818 +    def additionalInputFileTgz(self):
819 +        """
820 +        Put all additional files into a tar ball and return its name
821 +        """
822 +        import tarfile
823 +        tarName=  common.work_space.pathForTgz()+'share/'+self.additional_tgz_name
824 +        tar = tarfile.open(tarName, "w:gz")
825 +        for file in self.additional_inbox_files:
826 +            tar.add(file,string.split(file,'/')[-1])
827 +        common.logger.debug(5,"Files added to "+self.additional_tgz_name+" : "+str(tar.getnames()))
828 +        tar.close()
829 +        return tarName
830 +
831      def wsSetupEnvironment(self, nj):
832          """
833          Returns part of a job script which prepares
# Line 580 | Line 838 | class Cmssw(JobType):
838    
839          ## OLI_Daniele at this level  middleware already known
840  
841 +        txt += 'echo "### Firtst set SCRAM ARCH and BUILD_ARCH ###"\n'
842 +        txt += 'echo "Setting SCRAM_ARCH='+self.executable_arch+'"\n'
843 +        txt += 'export SCRAM_ARCH='+self.executable_arch+'\n'
844 +        txt += 'export BUILD_ARCH='+self.executable_arch+'\n'
845          txt += 'if [ $middleware == LCG ]; then \n'
846          txt += self.wsSetupCMSLCGEnvironment_()
847          txt += 'elif [ $middleware == OSG ]; then\n'
848 <        txt += '    time=`date -u +"%s"`\n'
849 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
588 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
589 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
848 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
849 >        txt += '    echo "Created working directory: $WORKING_DIR"\n'
850          txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
851          txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
852 <        txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
853 <        txt += '        echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
854 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
852 >        txt += '    echo "JOB_EXIT_STATUS = 10016"\n'
853 >        txt += '    echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
854 >        txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
855          txt += '        rm -f $RUNTIME_AREA/$repo \n'
856          txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
597        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
857          txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
858          txt += '        exit 1\n'
859          txt += '    fi\n'
# Line 617 | Line 876 | class Cmssw(JobType):
876          txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
877          txt += '   rm -f $RUNTIME_AREA/$repo \n'
878          txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
620        txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
879          txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
880          ## OLI_Daniele
881          txt += '    if [ $middleware == OSG ]; then \n'
# Line 625 | Line 883 | class Cmssw(JobType):
883          txt += '        cd $RUNTIME_AREA\n'
884          txt += '        /bin/rm -rf $WORKING_DIR\n'
885          txt += '        if [ -d $WORKING_DIR ] ;then\n'
886 <        txt += '            echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
887 <        txt += '            echo "JOB_EXIT_STATUS = 10018"\n'
888 <        txt += '            echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
889 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
886 >        txt += '            echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
887 >        txt += '            echo "JOB_EXIT_STATUS = 10018"\n'
888 >        txt += '            echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
889 >        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
890          txt += '            rm -f $RUNTIME_AREA/$repo \n'
891          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
634        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
892          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
893          txt += '        fi\n'
894          txt += '    fi \n'
# Line 639 | Line 896 | class Cmssw(JobType):
896          txt += 'fi \n'
897          txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
898          txt += 'cd '+self.version+'\n'
899 +        ########## FEDE FOR DBS2 ######################
900 +        txt += 'SOFTWARE_DIR=`pwd`\n'
901 +        txt += 'echo SOFTWARE_DIR=$SOFTWARE_DIR \n'
902 +        ###############################################
903          ### needed grep for bug in scramv1 ###
904 +        txt += scram+' runtime -sh\n'
905          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
906 +        txt += 'echo $PATH\n'
907  
908          # Handle the arguments:
909          txt += "\n"
910          txt += "## number of arguments (first argument always jobnumber)\n"
911          txt += "\n"
912 <        txt += "narg=$#\n"
913 <        txt += "if [ $narg -lt 2 ]\n"
912 > #        txt += "narg=$#\n"
913 >        txt += "if [ $nargs -lt 2 ]\n"
914          txt += "then\n"
915 <        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$narg+ \n"
915 >        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$nargs+ \n"
916          txt += '    echo "JOB_EXIT_STATUS = 50113"\n'
917          txt += '    echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
918          txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
919          txt += '    rm -f $RUNTIME_AREA/$repo \n'
920          txt += '    echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
658        txt += '    echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
921          txt += '    echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
922          ## OLI_Daniele
923          txt += '    if [ $middleware == OSG ]; then \n'
# Line 663 | Line 925 | class Cmssw(JobType):
925          txt += '        cd $RUNTIME_AREA\n'
926          txt += '        /bin/rm -rf $WORKING_DIR\n'
927          txt += '        if [ -d $WORKING_DIR ] ;then\n'
928 <        txt += '            echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
929 <        txt += '            echo "JOB_EXIT_STATUS = 50114"\n'
930 <        txt += '            echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
931 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
928 >        txt += '            echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
929 >        txt += '            echo "JOB_EXIT_STATUS = 50114"\n'
930 >        txt += '            echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
931 >        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
932          txt += '            rm -f $RUNTIME_AREA/$repo \n'
933          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
672        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
934          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
935          txt += '        fi\n'
936          txt += '    fi \n'
# Line 679 | Line 940 | class Cmssw(JobType):
940  
941          # Prepare job-specific part
942          job = common.job_list[nj]
943 <        pset = os.path.basename(job.configFilename())
944 <        txt += '\n'
945 <        if (self.datasetPath): # standard job
946 <            txt += 'InputFiles=$2\n'
947 <            txt += 'echo "Inputfiles:<$InputFiles>"\n'
948 <            txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
949 <        else:  # pythia like job
950 <            txt += 'Seed=$2\n'
951 <            txt += 'echo "Seed: <$Seed>"\n'
952 <            txt += 'sed "s#INPUT#$Seed#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
943 >        ### FEDE FOR DBS OUTPUT PUBLICATION
944 >        if (self.datasetPath):
945 >            txt += '\n'
946 >            txt += 'DatasetPath='+self.datasetPath+'\n'
947 >
948 >            datasetpath_split = self.datasetPath.split("/")
949 >            
950 >            txt += 'PrimaryDataset='+datasetpath_split[1]+'\n'
951 >            txt += 'DataTier='+datasetpath_split[2]+'\n'
952 >            #txt += 'ProcessedDataset='+datasetpath_split[3]+'\n'
953 >            txt += 'ApplicationFamily=cmsRun\n'
954 >
955 >        else:
956 >            txt += 'DatasetPath=MCDataTier\n'
957 >            txt += 'PrimaryDataset=null\n'
958 >            txt += 'DataTier=null\n'
959 >            #txt += 'ProcessedDataset=null\n'
960 >            txt += 'ApplicationFamily=MCDataTier\n'
961 >        if self.pset != None: #CarlosDaniele
962 >            pset = os.path.basename(job.configFilename())
963 >            txt += '\n'
964 >            txt += 'cp  $RUNTIME_AREA/'+pset+' .\n'
965 >            if (self.datasetPath): # standard job
966 >                #txt += 'InputFiles=$2\n'
967 >                txt += 'InputFiles=${args[1]}\n'
968 >                txt += 'MaxEvents=${args[2]}\n'
969 >                txt += 'SkipEvents=${args[3]}\n'
970 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
971 >                txt += 'sed "s#{\'INPUT\'}#$InputFiles#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
972 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
973 >                txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
974 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
975 >                txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
976 >            else:  # pythia like job
977 >                seedIndex=1
978 >                if (self.firstRun):
979 >                    txt += 'FirstRun=${args['+str(seedIndex)+']}\n'
980 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
981 >                    txt += 'sed "s#\<INPUTFIRSTRUN\>#$FirstRun#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
982 >                    seedIndex=seedIndex+1
983 >
984 >                if (self.sourceSeed):
985 >                    txt += 'Seed=${args['+str(seedIndex)+']}\n'
986 >                    txt += 'sed "s#\<INPUT\>#$Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
987 >                    seedIndex=seedIndex+1
988 >                    ## the following seeds are not always present
989 >                    if (self.sourceSeedVtx):
990 >                        txt += 'VtxSeed=${args['+str(seedIndex)+']}\n'
991 >                        txt += 'echo "VtxSeed: <$VtxSeed>"\n'
992 >                        txt += 'sed "s#\<INPUTVTX\>#$VtxSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
993 >                        seedIndex += 1
994 >                    if (self.sourceSeedG4):
995 >                        txt += 'G4Seed=${args['+str(seedIndex)+']}\n'
996 >                        txt += 'echo "G4Seed: <$G4Seed>"\n'
997 >                        txt += 'sed "s#\<INPUTG4\>#$G4Seed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
998 >                        seedIndex += 1
999 >                    if (self.sourceSeedMix):
1000 >                        txt += 'mixSeed=${args['+str(seedIndex)+']}\n'
1001 >                        txt += 'echo "MixSeed: <$mixSeed>"\n'
1002 >                        txt += 'sed "s#\<INPUTMIX\>#$mixSeed#" '+pset+' > tmp && mv -f tmp '+pset+'\n'
1003 >                        seedIndex += 1
1004 >                    pass
1005 >                pass
1006 >            txt += 'mv -f '+pset+' pset.cfg\n'
1007  
1008          if len(self.additional_inbox_files) > 0:
1009 <            for file in self.additional_inbox_files:
1010 <                txt += 'if [ -e $RUNTIME_AREA/'+file+' ] ; then\n'
1011 <                txt += '   cp $RUNTIME_AREA/'+file+' .\n'
697 <                txt += '   chmod +x '+file+'\n'
698 <                txt += 'fi\n'
1009 >            txt += 'if [ -e $RUNTIME_AREA/'+self.additional_tgz_name+' ] ; then\n'
1010 >            txt += '  tar xzvf $RUNTIME_AREA/'+self.additional_tgz_name+'\n'
1011 >            txt += 'fi\n'
1012              pass
1013  
1014 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
1015 <
1016 <        txt += '\n'
1017 <        txt += 'echo "***** cat pset.cfg *********"\n'
1018 <        txt += 'cat pset.cfg\n'
1019 <        txt += 'echo "****** end pset.cfg ********"\n'
1020 <        txt += '\n'
1021 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
1022 <        # txt += 'cat pset1.cfg\n'
1023 <        # txt += 'echo "****** end pset1.cfg ********"\n'
1014 >        if self.pset != None: #CarlosDaniele
1015 >            txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
1016 >        
1017 >            txt += '\n'
1018 >            txt += 'echo "***** cat pset.cfg *********"\n'
1019 >            txt += 'cat pset.cfg\n'
1020 >            txt += 'echo "****** end pset.cfg ********"\n'
1021 >            txt += '\n'
1022 >            ### FEDE FOR DBS OUTPUT PUBLICATION
1023 >            txt += 'PSETHASH=`EdmConfigHash < pset.cfg` \n'
1024 >            txt += 'echo "PSETHASH = $PSETHASH" \n'
1025 >            ##############
1026 >            txt += '\n'
1027 >            # txt += 'echo "***** cat pset1.cfg *********"\n'
1028 >            # txt += 'cat pset1.cfg\n'
1029 >            # txt += 'echo "****** end pset1.cfg ********"\n'
1030          return txt
1031  
1032 <    def wsBuildExe(self, nj):
1032 >    def wsBuildExe(self, nj=0):
1033          """
1034          Put in the script the commands to build an executable
1035          or a library.
# Line 737 | Line 1056 | class Cmssw(JobType):
1056              txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1057              txt += '           rm -f $RUNTIME_AREA/$repo \n'
1058              txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
740            txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1059              txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1060              txt += '       fi\n'
1061              txt += '   fi \n'
# Line 746 | Line 1064 | class Cmssw(JobType):
1064              txt += 'else \n'
1065              txt += '   echo "Successful untar" \n'
1066              txt += 'fi \n'
1067 +            txt += '\n'
1068 +            txt += 'echo "Include ProdAgentApi and PRODCOMMON in PYTHONPATH"\n'
1069 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
1070 +            #### FEDE FOR DBS OUTPUT PUBLICATION
1071 +            txt += '   export PYTHONPATH=$SOFTWARE_DIR/ProdAgentApi:$SOFTWARE_DIR/ProdCommon\n'
1072 +            #txt += '   export PYTHONPATH=`pwd`/ProdAgentApi:`pwd`/ProdCommon\n'
1073 +            #txt += '   export PYTHONPATH=ProdAgentApi\n'
1074 +            txt += 'else\n'
1075 +            txt += '   export PYTHONPATH=$SOFTWARE_DIR/ProdAgentApi:$SOFTWARE_DIR/ProdCommon:${PYTHONPATH}\n'
1076 +            #txt += '   export PYTHONPATH=`pwd`/ProdAgentApi:`pwd`/ProdCommon:${PYTHONPATH}\n'
1077 +            #txt += '   export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n'
1078 +            txt += 'echo "PYTHONPATH=$PYTHONPATH"\n'
1079 +            ###################  
1080 +            txt += 'fi\n'
1081 +            txt += '\n'
1082 +
1083              pass
1084          
1085          return txt
# Line 757 | Line 1091 | class Cmssw(JobType):
1091          """
1092          
1093      def executableName(self):
1094 <        return self.executable
1094 >        if self.scriptExe: #CarlosDaniele
1095 >            return "sh "
1096 >        else:
1097 >            return self.executable
1098  
1099      def executableArgs(self):
1100 <        return " -p pset.cfg"
1100 >        if self.scriptExe:#CarlosDaniele
1101 >            return   self.scriptExe + " $NJob"
1102 >        else:
1103 >            return " -p pset.cfg"
1104  
1105      def inputSandbox(self, nj):
1106          """
1107          Returns a list of filenames to be put in JDL input sandbox.
1108          """
1109          inp_box = []
1110 <        # dict added to delete duplicate from input sandbox file list
1111 <        seen = {}
1110 >        # # dict added to delete duplicate from input sandbox file list
1111 >        # seen = {}
1112          ## code
1113          if os.path.isfile(self.tgzNameWithPath):
1114              inp_box.append(self.tgzNameWithPath)
1115 +        if os.path.isfile(self.MLtgzfile):
1116 +            inp_box.append(self.MLtgzfile)
1117          ## config
1118 <        inp_box.append(common.job_list[nj].configFilename())
1118 >        if not self.pset is None:
1119 >            inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename())
1120          ## additional input files
1121 <        #for file in self.additional_inbox_files:
1122 <        #    inp_box.append(common.work_space.cwdDir()+file)
1121 >        tgz = self.additionalInputFileTgz()
1122 >        inp_box.append(tgz)
1123          return inp_box
1124  
1125      def outputSandbox(self, nj):
# Line 785 | Line 1128 | class Cmssw(JobType):
1128          """
1129          out_box = []
1130  
788        stdout=common.job_list[nj].stdout()
789        stderr=common.job_list[nj].stderr()
790
1131          ## User Declared output files
1132 <        for out in self.output_file:
1132 >        for out in (self.output_file+self.output_file_sandbox):
1133              n_out = nj + 1
1134              out_box.append(self.numberFile_(out,str(n_out)))
1135          return out_box
796        return []
1136  
1137      def prepareSteeringCards(self):
1138          """
# Line 809 | Line 1148 | class Cmssw(JobType):
1148          txt = '\n'
1149          txt += '# directory content\n'
1150          txt += 'ls \n'
1151 <        file_list = ''
1152 <        for fileWithSuffix in self.output_file:
1151 >
1152 >        for fileWithSuffix in (self.output_file+self.output_file_sandbox):
1153              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
815            file_list=file_list+output_file_num+' '
1154              txt += '\n'
1155              txt += '# check output file\n'
1156 <            txt += 'ls '+fileWithSuffix+'\n'
1157 <            txt += 'exe_result=$?\n'
1158 <            txt += 'if [ $exe_result -ne 0 ] ; then\n'
1159 <            txt += '   echo "ERROR: No output file to manage"\n'
1160 <            txt += '   echo "JOB_EXIT_STATUS = $exe_result"\n'
1161 <            txt += '   echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
1162 <            txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
825 <            txt += '   rm -f $RUNTIME_AREA/$repo \n'
826 <            txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
827 <            txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
828 <            txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
829 <            ### OLI_DANIELE
1156 >            # txt += 'ls '+fileWithSuffix+'\n'
1157 >            # txt += 'ls_result=$?\n'
1158 >            txt += 'if [ -e ./'+fileWithSuffix+' ] ; then\n'
1159 >            txt += '   mv '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1160 >            txt += 'else\n'
1161 >            txt += '   exit_status=60302\n'
1162 >            txt += '   echo "ERROR: Problem with output file '+fileWithSuffix+'"\n'
1163              if common.scheduler.boss_scheduler_name == 'condor_g':
1164                  txt += '    if [ $middleware == OSG ]; then \n'
1165                  txt += '        echo "prepare dummy output file"\n'
1166                  txt += '        echo "Processing of job output failed" > $RUNTIME_AREA/'+output_file_num+'\n'
1167                  txt += '    fi \n'
835            txt += 'else\n'
836            txt += '   cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1168              txt += 'fi\n'
1169 +        file_list = []
1170 +        for fileWithSuffix in (self.output_file):
1171 +             file_list.append(self.numberFile_(fileWithSuffix, '$NJob'))
1172 +        txt += 'file_list="'+string.join(file_list,' ')+'"\n'
1173        
1174          txt += 'cd $RUNTIME_AREA\n'
1175 <        file_list=file_list[:-1]
841 <        txt += 'file_list="'+file_list+'"\n'
1175 >        #### FEDE this is the cleanEnv function
1176          ### OLI_DANIELE
1177 <        txt += 'if [ $middleware == OSG ]; then\n'  
1178 <        txt += '    cd $RUNTIME_AREA\n'
1179 <        txt += '    echo "Remove working directory: $WORKING_DIR"\n'
1180 <        txt += '    /bin/rm -rf $WORKING_DIR\n'
1181 <        txt += '    if [ -d $WORKING_DIR ] ;then\n'
1182 <        txt += '        echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1183 <        txt += '        echo "JOB_EXIT_STATUS = 60999"\n'
1184 <        txt += '        echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1185 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1186 <        txt += '        rm -f $RUNTIME_AREA/$repo \n'
1187 <        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1188 <        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1189 <        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1190 <        txt += '    fi\n'
1191 <        txt += 'fi\n'
1192 <        txt += '\n'
1177 >        #txt += 'if [ $middleware == OSG ]; then\n'  
1178 >        #txt += '    cd $RUNTIME_AREA\n'
1179 >        #txt += '    echo "Remove working directory: $WORKING_DIR"\n'
1180 >        #txt += '    /bin/rm -rf $WORKING_DIR\n'
1181 >        #txt += '    if [ -d $WORKING_DIR ] ;then\n'
1182 >        #txt += '        echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1183 >        #txt += '        echo "JOB_EXIT_STATUS = 60999"\n'
1184 >        #txt += '        echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1185 >        #txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1186 >        #txt += '        rm -f $RUNTIME_AREA/$repo \n'
1187 >        #txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1188 >        #txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1189 >        #txt += '    fi\n'
1190 >        #txt += 'fi\n'
1191 >        #txt += '\n'
1192 >
1193 >
1194          return txt
1195  
1196      def numberFile_(self, file, txt):
# Line 866 | Line 1201 | class Cmssw(JobType):
1201          # take away last extension
1202          name = p[0]
1203          for x in p[1:-1]:
1204 <           name=name+"."+x
1204 >            name=name+"."+x
1205          # add "_txt"
1206          if len(p)>1:
1207 <          ext = p[len(p)-1]
1208 <          #result = name + '_' + str(txt) + "." + ext
874 <          result = name + '_' + txt + "." + ext
1207 >            ext = p[len(p)-1]
1208 >            result = name + '_' + txt + "." + ext
1209          else:
1210 <          #result = name + '_' + str(txt)
877 <          result = name + '_' + txt
1210 >            result = name + '_' + txt
1211          
1212          return result
1213  
1214 <    def getRequirements(self):
1214 >    def getRequirements(self, nj=[]):
1215          """
1216          return job requirements to add to jdl files
1217          """
1218          req = ''
1219 <        if common.analisys_common_info['sw_version']:
1219 >        if self.version:
1220              req='Member("VO-cms-' + \
1221 <                 common.analisys_common_info['sw_version'] + \
1221 >                 self.version + \
1222 >                 '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1223 >        ## SL add requirement for OS version only if SL4
1224 >        #reSL4 = re.compile( r'slc4' )
1225 >        if self.executable_arch: # and reSL4.search(self.executable_arch):
1226 >            req+=' && Member("VO-cms-' + \
1227 >                 self.executable_arch + \
1228                   '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1229 <        if common.analisys_common_info['sites']:
1230 <            if len(common.analisys_common_info['sites'])>0:
1231 <                req = req + ' && ('
893 <                for i in range(len(common.analisys_common_info['sites'])):
894 <                    req = req + 'other.GlueCEInfoHostName == "' \
895 <                         + common.analisys_common_info['sites'][i] + '"'
896 <                    if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
897 <                        req = req + ' || '
898 <            req = req + ')'
899 <        #print "req = ", req
1229 >
1230 >        req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
1231 >
1232          return req
1233  
1234      def configFilename(self):
# Line 913 | Line 1245 | class Cmssw(JobType):
1245          txt += '   echo "### SETUP CMS OSG  ENVIRONMENT ###"\n'
1246          txt += '   if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
1247          txt += '      # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
1248 +        txt += '       export SCRAM_ARCH='+self.executable_arch+'\n'
1249          txt += '       source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
1250 <        txt += '   elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
1251 <        txt += '      # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
1252 <        txt += '       source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
1250 >        txt += '   elif [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n'
1251 >        txt += '      # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n'
1252 >        txt += '       export SCRAM_ARCH='+self.executable_arch+'\n'
1253 >        txt += '       source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n'
1254          txt += '   else\n'
1255 <        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1255 >        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1256          txt += '       echo "JOB_EXIT_STATUS = 10020"\n'
1257          txt += '       echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1258          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1259          txt += '       rm -f $RUNTIME_AREA/$repo \n'
1260          txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
927        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1261          txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1262          txt += '       exit 1\n'
1263          txt += '\n'
# Line 932 | Line 1265 | class Cmssw(JobType):
1265          txt += '       cd $RUNTIME_AREA\n'
1266          txt += '       /bin/rm -rf $WORKING_DIR\n'
1267          txt += '       if [ -d $WORKING_DIR ] ;then\n'
1268 <        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1269 <        txt += '            echo "JOB_EXIT_STATUS = 10017"\n'
1270 <        txt += '            echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1271 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
1272 <        txt += '            rm -f $RUNTIME_AREA/$repo \n'
1273 <        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1274 <        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
942 <        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1268 >        txt += '           echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1269 >        txt += '           echo "JOB_EXIT_STATUS = 10017"\n'
1270 >        txt += '           echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1271 >        txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1272 >        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1273 >        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1274 >        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1275          txt += '       fi\n'
1276          txt += '\n'
1277          txt += '       exit 1\n'
# Line 965 | Line 1297 | class Cmssw(JobType):
1297          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1298          txt += '       rm -f $RUNTIME_AREA/$repo \n'
1299          txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
968        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1300          txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1301          txt += '       exit 1\n'
1302          txt += '   else\n'
# Line 977 | Line 1308 | class Cmssw(JobType):
1308          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1309          txt += '           rm -f $RUNTIME_AREA/$repo \n'
1310          txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
980        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1311          txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1312          txt += '           exit 1\n'
1313          txt += '       fi\n'
# Line 991 | Line 1321 | class Cmssw(JobType):
1321          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1322          txt += '           rm -f $RUNTIME_AREA/$repo \n'
1323          txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
994        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1324          txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1325          txt += '           exit 1\n'
1326          txt += '       fi\n'
1327          txt += '   fi\n'
1328          txt += '   \n'
1000        txt += '   string=`cat /etc/redhat-release`\n'
1001        txt += '   echo $string\n'
1002        txt += '   if [[ $string = *alhalla* ]]; then\n'
1003        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1004        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
1005        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
1006        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1007        txt += '   else\n'
1008        txt += '       echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n'
1009        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
1010        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
1011        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1012        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1013        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1014        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1015        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1016        txt += '       exit 1\n'
1017        txt += '   fi\n'
1329          txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1330          txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1331          return txt
1332  
1333 +    ### FEDE FOR DBS OUTPUT PUBLICATION
1334 +    def modifyReport(self, nj):
1335 +        """
1336 +        insert the part of the script that modifies the FrameworkJob Report
1337 +        """
1338 +
1339 +        txt = ''
1340 +        txt += 'echo "Modify Job Report" \n'
1341 +        #txt += 'chmod a+x $RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py\n'
1342 +        ################ FEDE FOR DBS2 #############################################
1343 +        txt += 'chmod a+x $SOFTWARE_DIR/ProdAgentApi/FwkJobRep/ModifyJobReport.py\n'
1344 +        #############################################################################
1345 +        try:
1346 +            publish_data = int(self.cfg_params['USER.publish_data'])          
1347 +        except KeyError:
1348 +            publish_data = 0
1349 +
1350 +        txt += 'if [ -z "$SE" ]; then\n'
1351 +        txt += '    SE="" \n'
1352 +        txt += 'fi \n'
1353 +        txt += 'if [ -z "$SE_PATH" ]; then\n'
1354 +        txt += '    SE_PATH="" \n'
1355 +        txt += 'fi \n'
1356 +        txt += 'echo "SE = $SE"\n'
1357 +        txt += 'echo "SE_PATH = $SE_PATH"\n'
1358 +
1359 +        if (publish_data == 1):  
1360 +            #processedDataset = self.cfg_params['USER.processed_datasetname']
1361 +            processedDataset = self.cfg_params['USER.publish_data_name']
1362 +            txt += 'ProcessedDataset='+processedDataset+'\n'
1363 +            #### LFN=/store/user/<user>/processedDataset_PSETHASH
1364 +            txt += 'if [ "$SE_PATH" == "" ]; then\n'
1365 +            #### FEDE: added slash in LFN ##############
1366 +            txt += '    FOR_LFN=/copy_problems/ \n'
1367 +            txt += 'else \n'
1368 +            txt += '    tmp=`echo $SE_PATH | awk -F \'store\' \'{print$2}\'` \n'
1369 +            #####  FEDE TO BE CHANGED, BECAUSE STORE IS HARDCODED!!!! ########
1370 +            txt += '    FOR_LFN=/store$tmp \n'
1371 +            txt += 'fi \n'
1372 +            txt += 'echo "ProcessedDataset = $ProcessedDataset"\n'
1373 +            txt += 'echo "FOR_LFN = $FOR_LFN" \n'
1374 +            txt += 'echo "CMSSW_VERSION = $CMSSW_VERSION"\n\n'
1375 +            #txt += 'echo "$RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n'
1376 +            txt += 'echo "$SOFTWARE_DIR/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n'
1377 +            txt += '$SOFTWARE_DIR/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n'
1378 +            #txt += '$RUNTIME_AREA/'+self.version+'/ProdAgentApi/FwkJobRep/ModifyJobReport.py crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n'
1379 +      
1380 +            txt += 'modifyReport_result=$?\n'
1381 +            txt += 'echo modifyReport_result = $modifyReport_result\n'
1382 +            txt += 'if [ $modifyReport_result -ne 0 ]; then\n'
1383 +            txt += '    exit_status=1\n'
1384 +            txt += '    echo "ERROR: Problem with ModifyJobReport"\n'
1385 +            txt += 'else\n'
1386 +            txt += '    mv NewFrameworkJobReport.xml crab_fjr_$NJob.xml\n'
1387 +            txt += 'fi\n'
1388 +        else:
1389 +            txt += 'ProcessedDataset=no_data_to_publish \n'
1390 +            #### FEDE: added slash in LFN ##############
1391 +            txt += 'FOR_LFN=/local/ \n'
1392 +            txt += 'echo "ProcessedDataset = $ProcessedDataset"\n'
1393 +            txt += 'echo "FOR_LFN = $FOR_LFN" \n'
1394 +        return txt
1395 +
1396 +    def cleanEnv(self):
1397 +        ### OLI_DANIELE
1398 +        txt = ''
1399 +        txt += 'if [ $middleware == OSG ]; then\n'  
1400 +        txt += '    cd $RUNTIME_AREA\n'
1401 +        txt += '    echo "Remove working directory: $WORKING_DIR"\n'
1402 +        txt += '    /bin/rm -rf $WORKING_DIR\n'
1403 +        txt += '    if [ -d $WORKING_DIR ] ;then\n'
1404 +        txt += '              echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1405 +        txt += '              echo "JOB_EXIT_STATUS = 60999"\n'
1406 +        txt += '              echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1407 +        txt += '              dumpStatus $RUNTIME_AREA/$repo\n'
1408 +        txt += '        rm -f $RUNTIME_AREA/$repo \n'
1409 +        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1410 +        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1411 +        txt += '    fi\n'
1412 +        txt += 'fi\n'
1413 +        txt += '\n'
1414 +        return txt
1415 +
1416      def setParam_(self, param, value):
1417          self._params[param] = value
1418  
# Line 1030 | Line 1424 | class Cmssw(JobType):
1424          
1425      def getTaskid(self):
1426          return self._taskId
1427 +
1428 +    def uniquelist(self, old):
1429 +        """
1430 +        remove duplicates from a list
1431 +        """
1432 +        nd={}
1433 +        for e in old:
1434 +            nd[e]=0
1435 +        return nd.keys()

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines