ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.6 by gutsche, Sun Jun 4 16:53:36 2006 UTC vs.
Revision 1.73 by gutsche, Sun Apr 8 18:39:51 2007 UTC

# Line 4 | Line 4 | from crab_exceptions import *
4   from crab_util import *
5   import common
6   import PsetManipulator  
7 <
8 < import DBSInfo_EDM
9 < import DataDiscovery_EDM
10 < import DataLocation_EDM
7 > import DataDiscovery
8 > import DataDiscovery_DBS2
9 > import DataLocation
10   import Scram
11  
12 < import os, string, re
12 > import os, string, re, shutil, glob
13  
14   class Cmssw(JobType):
15 <    def __init__(self, cfg_params):
15 >    def __init__(self, cfg_params, ncjobs):
16          JobType.__init__(self, 'CMSSW')
17          common.logger.debug(3,'CMSSW::__init__')
18  
20        self.analisys_common_info = {}
19          # Marco.
20          self._params = {}
21          self.cfg_params = cfg_params
22  
23 +        try:
24 +            self.MaxTarBallSize = float(self.cfg_params['EDG.maxtarballsize'])
25 +        except KeyError:
26 +            self.MaxTarBallSize = 100.0
27 +
28 +        # number of jobs requested to be created, limit obj splitting
29 +        self.ncjobs = ncjobs
30 +
31          log = common.logger
32          
33          self.scram = Scram.Scram(cfg_params)
28        scramArea = ''
34          self.additional_inbox_files = []
35          self.scriptExe = ''
36          self.executable = ''
37 +        self.executable_arch = self.scram.getArch()
38          self.tgz_name = 'default.tgz'
39 +        self.scriptName = 'CMSSW.sh'
40 +        self.pset = ''      #scrip use case Da  
41 +        self.datasetPath = '' #scrip use case Da
42  
43 +        # set FJR file name
44 +        self.fjrFileName = 'crab_fjr.xml'
45  
46          self.version = self.scram.getSWVersion()
47 +        common.taskDB.setDict('codeVersion',self.version)
48          self.setParam_('application', self.version)
37        common.analisys_common_info['sw_version'] = self.version
38        ### FEDE
39        common.analisys_common_info['copy_input_data'] = 0
40        common.analisys_common_info['events_management'] = 1
49  
50          ### collect Data cards
51 +
52 +        ## get DBS mode
53          try:
54 <         #   self.owner = cfg_params['CMSSW.owner']
55 <         #   log.debug(6, "CMSSW::CMSSW(): owner = "+self.owner)
56 <         #   self.dataset = cfg_params['CMSSW.dataset']
57 <            self.datasetPath = cfg_params['CMSSW.datasetpath']
58 <            log.debug(6, "CMSSW::CMSSW(): datasetPath = "+self.datasetPath)
54 >            self.use_dbs_2 = int(self.cfg_params['CMSSW.use_dbs_2'])
55 >        except KeyError:
56 >            self.use_dbs_2 = 0
57 >            
58 >        try:
59 >            tmp =  cfg_params['CMSSW.datasetpath']
60 >            log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
61 >            if string.lower(tmp)=='none':
62 >                self.datasetPath = None
63 >                self.selectNoInput = 1
64 >            else:
65 >                self.datasetPath = tmp
66 >                self.selectNoInput = 0
67          except KeyError:
50        #    msg = "Error: owner and/or dataset not defined "
68              msg = "Error: datasetpath not defined "  
69              raise CrabException(msg)
70  
71          # ML monitoring
72          # split dataset path style: /PreProdR3Minbias/SIM/GEN-SIM
73 <        datasetpath_split = self.datasetPath.split("/")
74 <        self.setParam_('dataset', datasetpath_split[1])
75 <        self.setParam_('owner', datasetpath_split[-1])
76 <
77 <
73 >        if not self.datasetPath:
74 >            self.setParam_('dataset', 'None')
75 >            self.setParam_('owner', 'None')
76 >        else:
77 >            datasetpath_split = self.datasetPath.split("/")
78 >            self.setParam_('dataset', datasetpath_split[1])
79 >            self.setParam_('owner', datasetpath_split[-1])
80  
81 +        self.setTaskid_()
82 +        self.setParam_('taskId', self.cfg_params['taskId'])
83  
84          self.dataTiers = []
64 #       try:
65 #           tmpDataTiers = string.split(cfg_params['CMSSW.data_tier'],',')
66 #           for tmp in tmpDataTiers:
67 #               tmp=string.strip(tmp)
68 #               self.dataTiers.append(tmp)
69 #               pass
70 #           pass
71 #       except KeyError:
72 #           pass
73 #       log.debug(6, "Cmssw::Cmssw(): dataTiers = "+str(self.dataTiers))
85  
86          ## now the application
87          try:
# Line 89 | Line 100 | class Cmssw(JobType):
100          try:
101              self.pset = cfg_params['CMSSW.pset']
102              log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
103 <            if (not os.path.exists(self.pset)):
104 <                raise CrabException("User defined PSet file "+self.pset+" does not exist")
103 >            if self.pset.lower() != 'none' :
104 >                if (not os.path.exists(self.pset)):
105 >                    raise CrabException("User defined PSet file "+self.pset+" does not exist")
106 >            else:
107 >                self.pset = None
108          except KeyError:
109              raise CrabException("PSet file missing. Cannot run cmsRun ")
110  
111          # output files
112 +        ## stuff which must be returned always via sandbox
113 +        self.output_file_sandbox = []
114 +
115 +        # add fjr report by default via sandbox
116 +        self.output_file_sandbox.append(self.fjrFileName)
117 +
118 +        # other output files to be returned via sandbox or copied to SE
119          try:
120              self.output_file = []
100
121              tmp = cfg_params['CMSSW.output_file']
122              if tmp != '':
123                  tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
# Line 107 | Line 127 | class Cmssw(JobType):
127                      self.output_file.append(tmp)
128                      pass
129              else:
130 <                log.message("No output file defined: only stdout/err will be available")
130 >                log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available")
131                  pass
132              pass
133          except KeyError:
134 <            log.message("No output file defined: only stdout/err will be available")
134 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available")
135              pass
136  
137          # script_exe file as additional file in inputSandbox
138          try:
139 <           self.scriptExe = cfg_params['USER.script_exe']
140 <           self.additional_inbox_files.append(self.scriptExe)
139 >            self.scriptExe = cfg_params['USER.script_exe']
140 >            if self.scriptExe != '':
141 >               if not os.path.isfile(self.scriptExe):
142 >                  msg ="ERROR. file "+self.scriptExe+" not found"
143 >                  raise CrabException(msg)
144 >               self.additional_inbox_files.append(string.strip(self.scriptExe))
145          except KeyError:
146 <           pass
147 <        if self.scriptExe != '':
148 <           if os.path.isfile(self.scriptExe):
149 <              pass
150 <           else:
151 <              log.message("WARNING. file "+self.scriptExe+" not found")
152 <              sys.exit()
129 <                  
146 >            self.scriptExe = ''
147 >
148 >        #CarlosDaniele
149 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
150 >           msg ="Error. script_exe  not defined"
151 >           raise CrabException(msg)
152 >
153          ## additional input files
154          try:
155 <            tmpAddFiles = string.split(cfg_params['CMSSW.additional_input_files'],',')
155 >            tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
156              for tmp in tmpAddFiles:
157 <                if not os.path.exists(tmp):
158 <                    raise CrabException("Additional input file not found: "+tmp)
159 <                tmp=string.strip(tmp)
160 <                self.additional_inbox_files.append(tmp)
157 >                tmp = string.strip(tmp)
158 >                dirname = ''
159 >                if not tmp[0]=="/": dirname = "."
160 >                files = glob.glob(os.path.join(dirname, tmp))
161 >                for file in files:
162 >                    if not os.path.exists(file):
163 >                        raise CrabException("Additional input file not found: "+file)
164 >                    pass
165 >                    storedFile = common.work_space.shareDir()+file
166 >                    shutil.copyfile(file, storedFile)
167 >                    self.additional_inbox_files.append(string.strip(storedFile))
168                  pass
169              pass
170 +            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
171          except KeyError:
172              pass
173  
174 +        # files per job
175          try:
176 <            self.filesPerJob = int(cfg_params['CMSSW.files_per_jobs']) #Daniele
176 >            if (cfg_params['CMSSW.files_per_jobs']):
177 >                raise CrabException("files_per_jobs no longer supported.  Quitting.")
178          except KeyError:
179 <            self.filesPerJob = 1
179 >            pass
180  
181 <        ## Max event   will be total_number_of_events ???  Daniele
181 >        ## Events per job
182          try:
183 <            self.maxEv = cfg_params['CMSSW.event_per_job']
183 >            self.eventsPerJob =int( cfg_params['CMSSW.events_per_job'])
184 >            self.selectEventsPerJob = 1
185          except KeyError:
186 <            self.maxEv = "-1"
187 <        ##  
188 <        try:
189 <            self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
156 <        except KeyError:
157 <            msg = 'Must define total_number_of_events'
158 <            raise CrabException(msg)
159 <        
160 <        CEBlackList = []
186 >            self.eventsPerJob = -1
187 >            self.selectEventsPerJob = 0
188 >    
189 >        ## number of jobs
190          try:
191 <            tmpBad = string.split(cfg_params['EDG.ce_black_list'],',')
192 <            for tmp in tmpBad:
164 <                tmp=string.strip(tmp)
165 <                CEBlackList.append(tmp)
191 >            self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs'])
192 >            self.selectNumberOfJobs = 1
193          except KeyError:
194 <            pass
194 >            self.theNumberOfJobs = 0
195 >            self.selectNumberOfJobs = 0
196  
197 <        self.reCEBlackList=[]
198 <        for bad in CEBlackList:
199 <            self.reCEBlackList.append(re.compile( bad ))
172 <
173 <        common.logger.debug(5,'CEBlackList: '+str(CEBlackList))
174 <
175 <        CEWhiteList = []
176 <        try:
177 <            tmpGood = string.split(cfg_params['EDG.ce_white_list'],',')
178 <            for tmp in tmpGood:
179 <                tmp=string.strip(tmp)
180 <                CEWhiteList.append(tmp)
197 >        try:
198 >            self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
199 >            self.selectTotalNumberEvents = 1
200          except KeyError:
201 <            pass
201 >            self.total_number_of_events = 0
202 >            self.selectTotalNumberEvents = 0
203  
204 <        #print 'CEWhiteList: ',CEWhiteList
205 <        self.reCEWhiteList=[]
206 <        for Good in CEWhiteList:
207 <            self.reCEWhiteList.append(re.compile( Good ))
204 >        if self.pset != None: #CarlosDaniele
205 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
206 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
207 >                 raise CrabException(msg)
208 >        else:
209 >             if (self.selectNumberOfJobs == 0):
210 >                 msg = 'Must specify  number_of_jobs.'
211 >                 raise CrabException(msg)
212  
213 <        common.logger.debug(5,'CEWhiteList: '+str(CEWhiteList))
213 >        ## source seed for pythia
214 >        try:
215 >            self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
216 >        except KeyError:
217 >            self.sourceSeed = None
218 >            common.logger.debug(5,"No seed given")
219  
220 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
220 >        try:
221 >            self.sourceSeedVtx = int(cfg_params['CMSSW.vtx_seed'])
222 >        except KeyError:
223 >            self.sourceSeedVtx = None
224 >            common.logger.debug(5,"No vertex seed given")
225 >        try:
226 >            self.firstRun = int(cfg_params['CMSSW.first_run'])
227 >        except KeyError:
228 >            self.firstRun = None
229 >            common.logger.debug(5,"No first run given")
230 >        if self.pset != None: #CarlosDaniele
231 >            self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
232  
233          #DBSDLS-start
234          ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
235          self.maxEvents=0  # max events available   ( --> check the requested nb. of evts in Creator.py)
236          self.DBSPaths={}  # all dbs paths requested ( --> input to the site local discovery script)
237 +        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
238          ## Perform the data location and discovery (based on DBS/DLS)
239 <        self.DataDiscoveryAndLocation(cfg_params)
239 >        ## SL: Don't if NONE is specified as input (pythia use case)
240 >        blockSites = {}
241 >        if self.datasetPath:
242 >            blockSites = self.DataDiscoveryAndLocation(cfg_params)
243          #DBSDLS-end          
244  
245          self.tgzNameWithPath = self.getTarBall(self.executable)
246 <
247 <        self.jobSplitting()  #Daniele job Splitting
248 <        self.PsetEdit.maxEvent(self.maxEv) #Daniele  
249 <        self.PsetEdit.inputModule("INPUT") #Daniele  
250 <        self.PsetEdit.psetWriter(self.configFilename())
251 <        
252 <
246 >    
247 >        ## Select Splitting
248 >        if self.selectNoInput:
249 >            if self.pset == None: #CarlosDaniele
250 >                self.jobSplittingForScript()
251 >            else:
252 >                self.jobSplittingNoInput()
253 >        else:
254 >            self.jobSplittingByBlocks(blockSites)
255 >
256 >        # modify Pset
257 >        if self.pset != None: #CarlosDaniele
258 >            try:
259 >                if (self.datasetPath): # standard job
260 >                    # allow to processa a fraction of events in a file
261 >                    self.PsetEdit.inputModule("INPUT")
262 >                    self.PsetEdit.maxEvent("INPUTMAXEVENTS")
263 >                    self.PsetEdit.skipEvent("INPUTSKIPEVENTS")
264 >                else:  # pythia like job
265 >                    self.PsetEdit.maxEvent(self.eventsPerJob)
266 >                    if (self.firstRun):
267 >                        self.PsetEdit.pythiaFirstRun("INPUTFIRSTRUN")  #First Run
268 >                    if (self.sourceSeed) :
269 >                        self.PsetEdit.pythiaSeed("INPUT")
270 >                        if (self.sourceSeedVtx) :
271 >                            self.PsetEdit.pythiaSeedVtx("INPUTVTX")
272 >                # add FrameworkJobReport to parameter-set
273 >                self.PsetEdit.addCrabFJR(self.fjrFileName)
274 >                self.PsetEdit.psetWriter(self.configFilename())
275 >            except:
276 >                msg='Error while manipuliating ParameterSet: exiting...'
277 >                raise CrabException(msg)
278  
279      def DataDiscoveryAndLocation(self, cfg_params):
280  
281          common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()")
282  
214        #datasetPath = "/"+self.owner+"/"+self.dataTiers[0]+"/"+self.dataset
215        
283          datasetPath=self.datasetPath
284  
218        ## TODO
219        dataTiersList = ""
220        dataTiers = dataTiersList.split(',')
221
285          ## Contact the DBS
286 +        common.logger.message("Contacting DBS...")
287          try:
288 <            self.pubdata=DataDiscovery_EDM.DataDiscovery_EDM(datasetPath, dataTiers, cfg_params)
288 >
289 >            if self.use_dbs_2 == 1 :
290 >                self.pubdata=DataDiscovery_DBS2.DataDiscovery_DBS2(datasetPath, cfg_params)
291 >            else :
292 >                self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
293              self.pubdata.fetchDBSInfo()
294  
295 <        except DataDiscovery_EDM.NotExistingDatasetError, ex :
295 >        except DataDiscovery.NotExistingDatasetError, ex :
296              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
297              raise CrabException(msg)
298 <
231 <        except DataDiscovery_EDM.NoDataTierinProvenanceError, ex :
298 >        except DataDiscovery.NoDataTierinProvenanceError, ex :
299              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
300              raise CrabException(msg)
301 <        except DataDiscovery_EDM.DataDiscoveryError, ex:
302 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
301 >        except DataDiscovery.DataDiscoveryError, ex:
302 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
303 >            raise CrabException(msg)
304 >        except DataDiscovery_DBS2.NotExistingDatasetError_DBS2, ex :
305 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
306 >            raise CrabException(msg)
307 >        except DataDiscovery_DBS2.NoDataTierinProvenanceError_DBS2, ex :
308 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
309 >            raise CrabException(msg)
310 >        except DataDiscovery_DBS2.DataDiscoveryError_DBS2, ex:
311 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
312              raise CrabException(msg)
313  
314          ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
239        ## self.DBSPaths=self.pubdata.getDBSPaths()
315          common.logger.message("Required data are :"+self.datasetPath)
316  
317 <        filesbyblock=self.pubdata.getFiles()
318 <        self.AllInputFiles=filesbyblock.values()
319 <        self.files = self.AllInputFiles        
245 <
246 <        ## TEMP
247 <    #    self.filesTmp = filesbyblock.values()
248 <    #    self.files = []
249 <    #    locPath='rfio:cmsbose2.bo.infn.it:/flatfiles/SE00/cms/fanfani/ProdTest/'
250 <    #    locPath=''
251 <    #    tmp = []
252 <    #    for file in self.filesTmp[0]:
253 <    #        tmp.append(locPath+file)
254 <    #    self.files.append(tmp)
255 <        ## END TEMP
317 >        self.filesbyblock=self.pubdata.getFiles()
318 >        self.eventsbyblock=self.pubdata.getEventsPerBlock()
319 >        self.eventsbyfile=self.pubdata.getEventsPerFile()
320  
321          ## get max number of events
258        #common.logger.debug(10,"number of events for primary fileblocks %i"%self.pubdata.getMaxEvents())
322          self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
323 <        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
323 >        common.logger.message("The number of available events is %s\n"%self.maxEvents)
324  
325 +        common.logger.message("Contacting DLS...")
326          ## Contact the DLS and build a list of sites hosting the fileblocks
327          try:
328 <            dataloc=DataLocation_EDM.DataLocation_EDM(filesbyblock.keys(),cfg_params)
328 >            dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params)
329              dataloc.fetchDLSInfo()
330 <        except DataLocation_EDM.DataLocationError , ex:
330 >        except DataLocation.DataLocationError , ex:
331              msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage()
332              raise CrabException(msg)
333          
270        allsites=dataloc.getSites()
271        common.logger.debug(5,"sites are %s"%allsites)
272        sites=self.checkBlackList(allsites)
273        common.logger.debug(5,"sites are (after black list) %s"%sites)
274        sites=self.checkWhiteList(sites)
275        common.logger.debug(5,"sites are (after white list) %s"%sites)
334  
335 <        if len(sites)==0:
336 <            msg = 'No sites hosting all the needed data! Exiting... '
337 <            raise CrabException(msg)
335 >        sites = dataloc.getSites()
336 >        allSites = []
337 >        listSites = sites.values()
338 >        for listSite in listSites:
339 >            for oneSite in listSite:
340 >                allSites.append(oneSite)
341 >        allSites = self.uniquelist(allSites)
342  
343 <        common.logger.message("List of Sites hosting the data : "+str(sites))
344 <        common.logger.debug(6, "List of Sites: "+str(sites))
345 <        common.analisys_common_info['sites']=sites    ## used in SchedulerEdg.py in createSchScript
284 <        self.setParam_('TargetCE', ','.join(sites))
285 <        return
343 >        common.logger.message("Sites ("+str(len(allSites))+") hosting part/all of dataset: "+str(allSites))
344 >        common.logger.debug(6, "List of Sites: "+str(allSites))
345 >        return sites
346      
347 <    def jobSplitting(self):
347 >    def jobSplittingByBlocks(self, blockSites):
348          """
349 <        first implemntation for job splitting  
350 <        """    
351 <      #  print 'eventi totali '+str(self.maxEvents)
352 <      #  print 'eventi totali richiesti dallo user '+str(self.total_number_of_events)
353 <        #print 'files per job '+str(self.filesPerJob)
354 <        common.logger.message('Required '+str(self.filesPerJob)+' files per job ')
355 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
349 >        Perform job splitting. Jobs run over an integer number of files
350 >        and no more than one block.
351 >        ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values
352 >        REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
353 >                  self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
354 >                  self.maxEvents, self.filesbyblock
355 >        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
356 >              self.total_number_of_jobs - Total # of jobs
357 >              self.list_of_args - File(s) job will run on (a list of lists)
358 >        """
359 >
360 >        # ---- Handle the possible job splitting configurations ---- #
361 >        if (self.selectTotalNumberEvents):
362 >            totalEventsRequested = self.total_number_of_events
363 >        if (self.selectEventsPerJob):
364 >            eventsPerJobRequested = self.eventsPerJob
365 >            if (self.selectNumberOfJobs):
366 >                totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob
367 >
368 >        # If user requested all the events in the dataset
369 >        if (totalEventsRequested == -1):
370 >            eventsRemaining=self.maxEvents
371 >        # If user requested more events than are in the dataset
372 >        elif (totalEventsRequested > self.maxEvents):
373 >            eventsRemaining = self.maxEvents
374 >            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
375 >        # If user requested less events than are in the dataset
376 >        else:
377 >            eventsRemaining = totalEventsRequested
378 >
379 >        # If user requested more events per job than are in the dataset
380 >        if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents):
381 >            eventsPerJobRequested = self.maxEvents
382 >
383 >        # For user info at end
384 >        totalEventCount = 0
385 >
386 >        if (self.selectTotalNumberEvents and self.selectNumberOfJobs):
387 >            eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
388 >
389 >        if (self.selectNumberOfJobs):
390 >            common.logger.message("May not create the exact number_of_jobs requested.")
391 >
392 >        if ( self.ncjobs == 'all' ) :
393 >            totalNumberOfJobs = 999999999
394 >        else :
395 >            totalNumberOfJobs = self.ncjobs
396 >            
397  
398 <        ## TODO: SL need to have (from DBS) a detailed list of how many events per each file
399 <        n_tot_files = (len(self.files[0]))
400 <        ## SL: this is wrong if the files have different number of events
401 <        evPerFile = int(self.maxEvents)/n_tot_files
402 <        
403 <        common.logger.debug(5,'Events per File '+str(evPerFile))
404 <
405 <        ## if asked to process all events, do it
406 <        if self.total_number_of_events == -1:
407 <            self.total_number_of_events=self.maxEvents
408 <            self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
409 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for all available events '+str(self.total_number_of_events)+' events')
398 >        blocks = blockSites.keys()
399 >        blockCount = 0
400 >        # Backup variable in case self.maxEvents counted events in a non-included block
401 >        numBlocksInDataset = len(blocks)
402 >
403 >        jobCount = 0
404 >        list_of_lists = []
405 >
406 >        # ---- Iterate over the blocks in the dataset until ---- #
407 >        # ---- we've met the requested total # of events    ---- #
408 >        while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
409 >            block = blocks[blockCount]
410 >            blockCount += 1
411 >            
412 >            if self.eventsbyblock.has_key(block) :
413 >                numEventsInBlock = self.eventsbyblock[block]
414 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
415 >            
416 >                files = self.filesbyblock[block]
417 >                numFilesInBlock = len(files)
418 >                if (numFilesInBlock <= 0):
419 >                    continue
420 >                fileCount = 0
421 >
422 >                # ---- New block => New job ---- #
423 >                parString = "\\{"
424 >                # counter for number of events in files currently worked on
425 >                filesEventCount = 0
426 >                # flag if next while loop should touch new file
427 >                newFile = 1
428 >                # job event counter
429 >                jobSkipEventCount = 0
430 >            
431 >                # ---- Iterate over the files in the block until we've met the requested ---- #
432 >                # ---- total # of events or we've gone over all the files in this block  ---- #
433 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
434 >                    file = files[fileCount]
435 >                    if newFile :
436 >                        try:
437 >                            numEventsInFile = self.eventsbyfile[file]
438 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
439 >                            # increase filesEventCount
440 >                            filesEventCount += numEventsInFile
441 >                            # Add file to current job
442 >                            parString += '\\\"' + file + '\\\"\,'
443 >                            newFile = 0
444 >                        except KeyError:
445 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
446 >                        
447 >
448 >                    # if less events in file remain than eventsPerJobRequested
449 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) :
450 >                        # if last file in block
451 >                        if ( fileCount == numFilesInBlock-1 ) :
452 >                            # end job using last file, use remaining events in block
453 >                            # close job and touch new file
454 >                            fullString = parString[:-2]
455 >                            fullString += '\\}'
456 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
457 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
458 >                            self.jobDestination.append(blockSites[block])
459 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
460 >                            # reset counter
461 >                            jobCount = jobCount + 1
462 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
463 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
464 >                            jobSkipEventCount = 0
465 >                            # reset file
466 >                            parString = "\\{"
467 >                            filesEventCount = 0
468 >                            newFile = 1
469 >                            fileCount += 1
470 >                        else :
471 >                            # go to next file
472 >                            newFile = 1
473 >                            fileCount += 1
474 >                    # if events in file equal to eventsPerJobRequested
475 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
476 >                        # close job and touch new file
477 >                        fullString = parString[:-2]
478 >                        fullString += '\\}'
479 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
480 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
481 >                        self.jobDestination.append(blockSites[block])
482 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
483 >                        # reset counter
484 >                        jobCount = jobCount + 1
485 >                        totalEventCount = totalEventCount + eventsPerJobRequested
486 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
487 >                        jobSkipEventCount = 0
488 >                        # reset file
489 >                        parString = "\\{"
490 >                        filesEventCount = 0
491 >                        newFile = 1
492 >                        fileCount += 1
493 >                        
494 >                    # if more events in file remain than eventsPerJobRequested
495 >                    else :
496 >                        # close job but don't touch new file
497 >                        fullString = parString[:-2]
498 >                        fullString += '\\}'
499 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
500 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
501 >                        self.jobDestination.append(blockSites[block])
502 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
503 >                        # increase counter
504 >                        jobCount = jobCount + 1
505 >                        totalEventCount = totalEventCount + eventsPerJobRequested
506 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
507 >                        # calculate skip events for last file
508 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
509 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
510 >                        # remove all but the last file
511 >                        filesEventCount = self.eventsbyfile[file]
512 >                        parString = "\\{"
513 >                        parString += '\\\"' + file + '\\\"\,'
514 >                    pass # END if
515 >                pass # END while (iterate over files in the block)
516 >        pass # END while (iterate over blocks in the dataset)
517 >        self.ncjobs = self.total_number_of_jobs = jobCount
518 >        if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
519 >            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
520 >        common.logger.message("\n"+str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
521          
522 <        else:
523 <            self.total_number_of_files = int(self.total_number_of_events/evPerFile)
524 <            ## SL: if ask for less event than what is computed to be available on a
525 <            ##     file, process the first file anyhow.
526 <            if self.total_number_of_files == 0:
527 <                self.total_number_of_files = self.total_number_of_files + 1
522 >        self.list_of_args = list_of_lists
523 >        return
524 >
525 >    def jobSplittingNoInput(self):
526 >        """
527 >        Perform job splitting based on number of event per job
528 >        """
529 >        common.logger.debug(5,'Splitting per events')
530 >        common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
531 >        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
532 >        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
533 >
534 >        if (self.total_number_of_events < 0):
535 >            msg='Cannot split jobs per Events with "-1" as total number of events'
536 >            raise CrabException(msg)
537 >
538 >        if (self.selectEventsPerJob):
539 >            if (self.selectTotalNumberEvents):
540 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
541 >            elif(self.selectNumberOfJobs) :  
542 >                self.total_number_of_jobs =self.theNumberOfJobs
543 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
544 >
545 >        elif (self.selectNumberOfJobs) :
546 >            self.total_number_of_jobs = self.theNumberOfJobs
547 >            self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
548 >
549 >        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
550 >
551 >        # is there any remainder?
552 >        check = int(self.total_number_of_events) - (int(self.total_number_of_jobs)*self.eventsPerJob)
553  
554 <            common.logger.debug(5,'N files  '+str(self.total_number_of_files))
554 >        common.logger.debug(5,'Check  '+str(check))
555  
556 <            check = 0
556 >        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
557 >        if check > 0:
558 >            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
559 >
560 >        # argument is seed number.$i
561 >        self.list_of_args = []
562 >        for i in range(self.total_number_of_jobs):
563 >            ## Since there is no input, any site is good
564 >           # self.jobDestination.append(["Any"])
565 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
566 >            args=''
567 >            if (self.firstRun):
568 >                    ## pythia first run
569 >                #self.list_of_args.append([(str(self.firstRun)+str(i))])
570 >                args=args+(str(self.firstRun)+str(i))
571 >            else:
572 >                ## no first run
573 >                #self.list_of_args.append([str(i)])
574 >                args=args+str(i)
575 >            if (self.sourceSeed):
576 >                if (self.sourceSeedVtx):
577 >                    ## pythia + vtx random seed
578 >                    #self.list_of_args.append([
579 >                    #                          str(self.sourceSeed)+str(i),
580 >                    #                          str(self.sourceSeedVtx)+str(i)
581 >                    #                          ])
582 >                    args=args+str(',')+str(self.sourceSeed)+str(i)+str(',')+str(self.sourceSeedVtx)+str(i)
583 >                else:
584 >                    ## only pythia random seed
585 >                    #self.list_of_args.append([(str(self.sourceSeed)+str(i))])
586 >                    args=args +str(',')+str(self.sourceSeed)+str(i)
587 >            else:
588 >                ## no random seed
589 >                if str(args)=='': args=args+(str(self.firstRun)+str(i))
590 >            arguments=args.split(',')
591 >            if len(arguments)==3:self.list_of_args.append([str(arguments[0]),str(arguments[1]),str(arguments[2])])
592 >            elif len(arguments)==2:self.list_of_args.append([str(arguments[0]),str(arguments[1])])
593 >            else :self.list_of_args.append([str(arguments[0])])
594              
595 <            ## Compute the number of jobs
322 <            #self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
323 <            self.total_number_of_jobs = int(self.total_number_of_files/self.filesPerJob)
324 <            common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
595 >     #   print self.list_of_args
596  
597 <            ## is there any remainder?
327 <            check = int(self.total_number_of_files) - (int(self.total_number_of_jobs)*self.filesPerJob)
597 >        return
598  
329            common.logger.debug(5,'Check  '+str(check))
599  
600 <            if check > 0:
601 <                self.total_number_of_jobs =  self.total_number_of_jobs + 1
602 <                common.logger.message('Warning: last job will be created with '+str(check)+' files')
600 >    def jobSplittingForScript(self):#CarlosDaniele
601 >        """
602 >        Perform job splitting based on number of job
603 >        """
604 >        common.logger.debug(5,'Splitting per job')
605 >        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
606  
607 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs-1)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
336 <            pass
607 >        self.total_number_of_jobs = self.theNumberOfJobs
608  
609 <        list_of_lists = []
610 <        for i in xrange(0, int(n_tot_files), self.filesPerJob):
611 <            list_of_lists.append(self.files[0][i: i+self.filesPerJob])
609 >        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
610 >
611 >        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
612  
613 <        self.list_of_files = list_of_lists
614 <      
613 >        # argument is seed number.$i
614 >        self.list_of_args = []
615 >        for i in range(self.total_number_of_jobs):
616 >            ## Since there is no input, any site is good
617 >           # self.jobDestination.append(["Any"])
618 >            self.jobDestination.append([""])
619 >            ## no random seed
620 >            self.list_of_args.append([str(i)])
621          return
622  
623      def split(self, jobParams):
# Line 348 | Line 625 | class Cmssw(JobType):
625          common.jobDB.load()
626          #### Fabio
627          njobs = self.total_number_of_jobs
628 <        filelist = self.list_of_files
628 >        arglist = self.list_of_args
629          # create the empty structure
630          for i in range(njobs):
631              jobParams.append("")
632          
633          for job in range(njobs):
634 <            jobParams[job] = filelist[job]
634 >            jobParams[job] = arglist[job]
635 >            # print str(arglist[job])
636 >            # print jobParams[job]
637              common.jobDB.setArguments(job, jobParams[job])
638 +            common.logger.debug(5,"Job "+str(job)+" Destination: "+str(self.jobDestination[job]))
639 +            common.jobDB.setDestination(job, self.jobDestination[job])
640  
641          common.jobDB.save()
642          return
643      
644      def getJobTypeArguments(self, nj, sched):
645 <        params = common.jobDB.arguments(nj)
646 <        #print params
647 <        parString = "\\{"
648 <        
368 <        for i in range(len(params) - 1):
369 <            parString += '\\\"' + params[i] + '\\\"\,'
370 <        
371 <        parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
372 <        return parString
645 >        result = ''
646 >        for i in common.jobDB.arguments(nj):
647 >            result=result+str(i)+" "
648 >        return result
649    
650      def numberOfJobs(self):
651          # Fabio
376
652          return self.total_number_of_jobs
378
379
380
381    def checkBlackList(self, allSites):
382        if len(self.reCEBlackList)==0: return allSites
383        sites = []
384        for site in allSites:
385            common.logger.debug(10,'Site '+site)
386            good=1
387            for re in self.reCEBlackList:
388                if re.search(site):
389                    common.logger.message('CE in black list, skipping site '+site)
390                    good=0
391                pass
392            if good: sites.append(site)
393        if len(sites) == 0:
394            common.logger.debug(3,"No sites found after BlackList")
395        return sites
396
397    def checkWhiteList(self, allSites):
398
399        if len(self.reCEWhiteList)==0: return allSites
400        sites = []
401        for site in allSites:
402            good=0
403            for re in self.reCEWhiteList:
404                if re.search(site):
405                    common.logger.debug(5,'CE in white list, adding site '+site)
406                    good=1
407                if not good: continue
408                sites.append(site)
409        if len(sites) == 0:
410            common.logger.message("No sites found after WhiteList\n")
411        else:
412            common.logger.debug(5,"Selected sites via WhiteList are "+str(sites)+"\n")
413        return sites
653  
654      def getTarBall(self, exe):
655          """
# Line 418 | Line 657 | class Cmssw(JobType):
657          """
658          
659          # if it exist, just return it
660 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
660 >        #
661 >        # Marco. Let's start to use relative path for Boss XML files
662 >        #
663 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
664          if os.path.exists(self.tgzNameWithPath):
665              return self.tgzNameWithPath
666  
# Line 432 | Line 674 | class Cmssw(JobType):
674          # First of all declare the user Scram area
675          swArea = self.scram.getSWArea_()
676          #print "swArea = ", swArea
677 <        swVersion = self.scram.getSWVersion()
678 <        #print "swVersion = ", swVersion
677 >        # swVersion = self.scram.getSWVersion()
678 >        # print "swVersion = ", swVersion
679          swReleaseTop = self.scram.getReleaseTop_()
680          #print "swReleaseTop = ", swReleaseTop
681          
# Line 441 | Line 683 | class Cmssw(JobType):
683          if swReleaseTop == '' or swArea == swReleaseTop:
684              return
685  
686 <        filesToBeTarred = []
687 <        ## First find the executable
688 <        if (self.executable != ''):
689 <            exeWithPath = self.scram.findFile_(executable)
690 < #           print exeWithPath
691 <            if ( not exeWithPath ):
692 <                raise CrabException('User executable '+executable+' not found')
693 <
694 <            ## then check if it's private or not
695 <            if exeWithPath.find(swReleaseTop) == -1:
696 <                # the exe is private, so we must ship
697 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
698 <                path = swArea+'/'
699 <                exe = string.replace(exeWithPath, path,'')
700 <                filesToBeTarred.append(exe)
701 <                pass
702 <            else:
703 <                # the exe is from release, we'll find it on WN
704 <                pass
705 <
706 <        ## Now get the libraries: only those in local working area
707 <        libDir = 'lib'
708 <        lib = swArea+'/' +libDir
709 <        common.logger.debug(5,"lib "+lib+" to be tarred")
710 <        if os.path.exists(lib):
711 <            filesToBeTarred.append(libDir)
712 <
713 <        ## Now check if module dir is present
714 <        moduleDir = 'module'
715 <        if os.path.isdir(swArea+'/'+moduleDir):
716 <            filesToBeTarred.append(moduleDir)
717 <
718 <        ## Now check if the Data dir is present
719 <        dataDir = 'src/Data/'
720 <        if os.path.isdir(swArea+'/'+dataDir):
721 <            filesToBeTarred.append(dataDir)
722 <
723 <        ## Create the tar-ball
724 <        if len(filesToBeTarred)>0:
725 <            cwd = os.getcwd()
726 <            os.chdir(swArea)
727 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
728 <            for line in filesToBeTarred:
729 <                tarcmd = tarcmd + line + ' '
730 <            cout = runCommand(tarcmd)
731 <            if not cout:
732 <                raise CrabException('Could not create tar-ball')
733 <            os.chdir(cwd)
734 <        else:
735 <            common.logger.debug(5,"No files to be to be tarred")
686 >        import tarfile
687 >        try: # create tar ball
688 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
689 >            ## First find the executable
690 >            if (executable != ''):
691 >                exeWithPath = self.scram.findFile_(executable)
692 >                if ( not exeWithPath ):
693 >                    raise CrabException('User executable '+executable+' not found')
694 >    
695 >                ## then check if it's private or not
696 >                if exeWithPath.find(swReleaseTop) == -1:
697 >                    # the exe is private, so we must ship
698 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
699 >                    path = swArea+'/'
700 >                    exe = string.replace(exeWithPath, path,'')
701 >                    tar.add(path+exe,executable)
702 >                    pass
703 >                else:
704 >                    # the exe is from release, we'll find it on WN
705 >                    pass
706 >    
707 >            ## Now get the libraries: only those in local working area
708 >            libDir = 'lib'
709 >            lib = swArea+'/' +libDir
710 >            common.logger.debug(5,"lib "+lib+" to be tarred")
711 >            if os.path.exists(lib):
712 >                tar.add(lib,libDir)
713 >    
714 >            ## Now check if module dir is present
715 >            moduleDir = 'module'
716 >            module = swArea + '/' + moduleDir
717 >            if os.path.isdir(module):
718 >                tar.add(module,moduleDir)
719 >
720 >            ## Now check if any data dir(s) is present
721 >            swAreaLen=len(swArea)
722 >            for root, dirs, files in os.walk(swArea):
723 >                if "data" in dirs:
724 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
725 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
726 >
727 >            ## Add ProdAgent dir to tar
728 >            paDir = 'ProdAgentApi'
729 >            pa = os.environ['CRABDIR'] + '/' + 'ProdAgentApi'
730 >            if os.path.isdir(pa):
731 >                tar.add(pa,paDir)
732 >        
733 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
734 >            tar.close()
735 >        except :
736 >            raise CrabException('Could not create tar-ball')
737 >
738 >        ## check for tarball size
739 >        tarballinfo = os.stat(self.tgzNameWithPath)
740 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
741 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
742 >
743 >        ## create tar-ball with ML stuff
744 >        self.MLtgzfile =  common.work_space.pathForTgz()+'share/MLfiles.tgz'
745 >        try:
746 >            tar = tarfile.open(self.MLtgzfile, "w:gz")
747 >            path=os.environ['CRABDIR'] + '/python/'
748 >            for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py']:
749 >                tar.add(path+file,file)
750 >            common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames()))
751 >            tar.close()
752 >        except :
753 >            raise CrabException('Could not create ML files tar-ball')
754          
755          return
756          
# Line 502 | Line 762 | class Cmssw(JobType):
762          # Prepare JobType-independent part
763          txt = ''
764    
505        ### OLI: moved to front to enable header reported before any error code is submitted
506        txt += "NJob=$1\n"
507        txt += "InputFiles=$2\n"
508        txt += "echo \"<$InputFiles>\"\n"
509        # txt += "Args = ` cat $2 |  sed -e \'s/\\\\//g\' -e \'s/\"/\\x27/g\' `"
510
511        ### OLI_DANIELE
512        txt += 'if [ $middleware == LCG ]; then \n'
513        txt += '    echo "MonitorJobID=`echo ${NJob}_$EDG_WL_JOBID`" | tee -a $RUNTIME_AREA/$repo\n'
514        txt += '    echo "SyncGridJobId=`echo $EDG_WL_JOBID`" | tee -a $RUNTIME_AREA/$repo\n'
515        txt += '    echo "SyncCE=`edg-brokerinfo getCE`" | tee -a $RUNTIME_AREA/$repo\n'
516        txt += 'elif [ $middleware == OSG ]; then\n'
517
518        # OLI: added monitoring for dashbord, use hash of crab.cfg
519        if common.scheduler.boss_scheduler_name == 'condor_g':
520            # create hash of cfg file
521            hash = makeCksum(common.work_space.cfgFileName())
522            txt += '    echo "MonitorJobID=`echo ${NJob}_'+hash+'_$GLOBUS_GRAM_JOB_CONTACT`" | tee -a $RUNTIME_AREA/$repo\n'
523            txt += '    echo "SyncGridJobId=`echo $GLOBUS_GRAM_JOB_CONTACT`" | tee -a $RUNTIME_AREA/$repo\n'
524            txt += '    echo "SyncCE=`echo $hostname`" | tee -a $RUNTIME_AREA/$repo\n'
525        else :
526            txt += '    echo "MonitorJobID=`echo ${NJob}_$EDG_WL_JOBID`" | tee -a $RUNTIME_AREA/$repo\n'
527            txt += '    echo "SyncGridJobId=`echo $EDG_WL_JOBID`" | tee -a $RUNTIME_AREA/$repo\n'
528            txt += '    echo "SyncCE=`$EDG_WL_LOG_DESTINATION`" | tee -a $RUNTIME_AREA/$repo\n'
529
530        txt += 'fi\n'
531        txt += 'dumpStatus $RUNTIME_AREA/$repo\n'
532        txt += '\n'
533
765          ## OLI_Daniele at this level  middleware already known
766  
767          txt += 'if [ $middleware == LCG ]; then \n'
768          txt += self.wsSetupCMSLCGEnvironment_()
769          txt += 'elif [ $middleware == OSG ]; then\n'
770 <        txt += '    time=`date -u +"%s"`\n'
771 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
541 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
542 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
770 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
771 >        txt += '    echo "Created working directory: $WORKING_DIR"\n'
772          txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
773 <        txt += '        echo "OSG WORKING DIR ==> $WORKING_DIR could not be created on on WN `hostname`"\n'
774 <    
775 <        txt += '        echo "JOB_EXIT_STATUS = 1"\n'
773 >        txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
774 >        txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
775 >        txt += '        echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
776 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
777 >        txt += '        rm -f $RUNTIME_AREA/$repo \n'
778 >        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
779 >        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
780          txt += '        exit 1\n'
781          txt += '    fi\n'
782          txt += '\n'
# Line 559 | Line 792 | class Cmssw(JobType):
792          txt += scram+' project CMSSW '+self.version+'\n'
793          txt += 'status=$?\n'
794          txt += 'if [ $status != 0 ] ; then\n'
795 <        txt += '   echo "SET_EXE_ENV 1 ==>ERROR CMSSW '+self.version+' not found on `hostname`" \n'
795 >        txt += '   echo "SET_EXE_ENV 10034 ==>ERROR CMSSW '+self.version+' not found on `hostname`" \n'
796          txt += '   echo "JOB_EXIT_STATUS = 10034"\n'
797 <        txt += '   echo "SanityCheckCode = 10034" | tee -a $RUNTIME_AREA/$repo\n'
797 >        txt += '   echo "JobExitCode=10034" | tee -a $RUNTIME_AREA/$repo\n'
798          txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
799 +        txt += '   rm -f $RUNTIME_AREA/$repo \n'
800 +        txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
801 +        txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
802          ## OLI_Daniele
803          txt += '    if [ $middleware == OSG ]; then \n'
804          txt += '        echo "Remove working directory: $WORKING_DIR"\n'
805          txt += '        cd $RUNTIME_AREA\n'
806          txt += '        /bin/rm -rf $WORKING_DIR\n'
807          txt += '        if [ -d $WORKING_DIR ] ;then\n'
808 <        txt += '            echo "OSG WORKING DIR ==> $WORKING_DIR could not be deleted on on WN `hostname`"\n'
808 >        txt += '            echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
809 >        txt += '            echo "JOB_EXIT_STATUS = 10018"\n'
810 >        txt += '            echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
811 >        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
812 >        txt += '            rm -f $RUNTIME_AREA/$repo \n'
813 >        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
814 >        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
815          txt += '        fi\n'
816          txt += '    fi \n'
817          txt += '   exit 1 \n'
818          txt += 'fi \n'
819          txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
820 +        txt += 'export SCRAM_ARCH='+self.executable_arch+'\n'
821          txt += 'cd '+self.version+'\n'
822          ### needed grep for bug in scramv1 ###
823 +        txt += scram+' runtime -sh\n'
824          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
825 +        txt += 'echo $PATH\n'
826  
827          # Handle the arguments:
828          txt += "\n"
829 <        txt += "## ARGUMNETS: $1 Job Number\n"
585 <        # txt += "## ARGUMNETS: $2 First Event for this job\n"
586 <        # txt += "## ARGUMNETS: $3 Max Event for this job\n"
829 >        txt += "## number of arguments (first argument always jobnumber)\n"
830          txt += "\n"
831 <        txt += "narg=$#\n"
832 <        txt += "if [ $narg -lt 2 ]\n"
831 > #        txt += "narg=$#\n"
832 >        txt += "if [ $nargs -lt 2 ]\n"
833          txt += "then\n"
834 <        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$narg+ \n"
834 >        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$nargs+ \n"
835          txt += '    echo "JOB_EXIT_STATUS = 50113"\n'
836 <        txt += '    echo "SanityCheckCode = 50113" | tee -a $RUNTIME_AREA/$repo\n'
836 >        txt += '    echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
837          txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
838 +        txt += '    rm -f $RUNTIME_AREA/$repo \n'
839 +        txt += '    echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
840 +        txt += '    echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
841          ## OLI_Daniele
842          txt += '    if [ $middleware == OSG ]; then \n'
843          txt += '        echo "Remove working directory: $WORKING_DIR"\n'
844          txt += '        cd $RUNTIME_AREA\n'
845          txt += '        /bin/rm -rf $WORKING_DIR\n'
846          txt += '        if [ -d $WORKING_DIR ] ;then\n'
847 <        txt += '            echo "OSG WORKING DIR ==> $WORKING_DIR could not be deleted on on WN `hostname`"\n'
847 >        txt += '            echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
848 >        txt += '            echo "JOB_EXIT_STATUS = 50114"\n'
849 >        txt += '            echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
850 >        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
851 >        txt += '            rm -f $RUNTIME_AREA/$repo \n'
852 >        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
853 >        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
854          txt += '        fi\n'
855          txt += '    fi \n'
856          txt += "    exit 1\n"
# Line 607 | Line 859 | class Cmssw(JobType):
859  
860          # Prepare job-specific part
861          job = common.job_list[nj]
862 <        pset = os.path.basename(job.configFilename())
863 <        txt += '\n'
864 <        #txt += 'echo sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' \n'
865 <        txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
866 <        #txt += 'sed "s#{\'INPUT\'}#${InputFiles}#" $RUNTIME_AREA/'+pset+' > pset1.cfg\n'
862 >        if self.pset != None: #CarlosDaniele
863 >            pset = os.path.basename(job.configFilename())
864 >            txt += '\n'
865 >            if (self.datasetPath): # standard job
866 >                #txt += 'InputFiles=$2\n'
867 >                txt += 'InputFiles=${args[1]}\n'
868 >                txt += 'MaxEvents=${args[2]}\n'
869 >                txt += 'SkipEvents=${args[3]}\n'
870 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
871 >                txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset_tmp_1.cfg\n'
872 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
873 >                txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" pset_tmp_1.cfg > pset_tmp_2.cfg\n'
874 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
875 >                txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" pset_tmp_2.cfg > pset.cfg\n'
876 >            else:  # pythia like job
877 >                if (self.sourceSeed):
878 >                    txt += 'FirstRun=${args[1]}\n'
879 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
880 >                    txt += 'sed "s#\<INPUTFIRSTRUN\>#$FirstRun#" $RUNTIME_AREA/'+pset+' > tmp_1.cfg\n'
881 >                else:
882 >                    txt += '# Copy untouched pset\n'
883 >                    txt += 'cp $RUNTIME_AREA/'+pset+' tmp_1.cfg\n'
884 >                if (self.sourceSeed):
885 > #                    txt += 'Seed=$2\n'
886 >                    txt += 'Seed=${args[2]}\n'
887 >                    txt += 'echo "Seed: <$Seed>"\n'
888 >                    txt += 'sed "s#\<INPUT\>#$Seed#" tmp_1.cfg > tmp_2.cfg\n'
889 >                    if (self.sourceSeedVtx):
890 > #                        txt += 'VtxSeed=$3\n'
891 >                        txt += 'VtxSeed=${args[3]}\n'
892 >                        txt += 'echo "VtxSeed: <$VtxSeed>"\n'
893 >                        txt += 'sed "s#INPUTVTX#$VtxSeed#" tmp_2.cfg > pset.cfg\n'
894 >                    else:
895 >                        txt += 'mv tmp_2.cfg pset.cfg\n'
896 >                else:
897 >                    txt += 'mv tmp_1.cfg pset.cfg\n'
898 >                   # txt += '# Copy untouched pset\n'
899 >                   # txt += 'cp $RUNTIME_AREA/'+pset+' pset.cfg\n'
900 >
901  
902          if len(self.additional_inbox_files) > 0:
903              for file in self.additional_inbox_files:
904 <                txt += 'if [ -e $RUNTIME_AREA/'+file+' ] ; then\n'
905 <                txt += '   cp $RUNTIME_AREA/'+file+' .\n'
906 <                txt += '   chmod +x '+file+'\n'
904 >                relFile = file.split("/")[-1]
905 >                txt += 'if [ -e $RUNTIME_AREA/'+relFile+' ] ; then\n'
906 >                txt += '   cp $RUNTIME_AREA/'+relFile+' .\n'
907 >                txt += '   chmod +x '+relFile+'\n'
908                  txt += 'fi\n'
909              pass
910  
911 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
912 <
913 <        txt += '\n'
914 <        txt += 'echo "***** cat pset.cfg *********"\n'
915 <        txt += 'cat pset.cfg\n'
916 <        txt += 'echo "****** end pset.cfg ********"\n'
917 <        txt += '\n'
918 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
919 <        # txt += 'cat pset1.cfg\n'
920 <        # txt += 'echo "****** end pset1.cfg ********"\n'
911 >        if self.pset != None: #CarlosDaniele
912 >            txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
913 >        
914 >            txt += '\n'
915 >            txt += 'echo "***** cat pset.cfg *********"\n'
916 >            txt += 'cat pset.cfg\n'
917 >            txt += 'echo "****** end pset.cfg ********"\n'
918 >            txt += '\n'
919 >            # txt += 'echo "***** cat pset1.cfg *********"\n'
920 >            # txt += 'cat pset1.cfg\n'
921 >            # txt += 'echo "****** end pset1.cfg ********"\n'
922          return txt
923  
924 <    def wsBuildExe(self, nj):
924 >    def wsBuildExe(self, nj=0):
925          """
926          Put in the script the commands to build an executable
927          or a library.
# Line 648 | Line 936 | class Cmssw(JobType):
936              txt += 'if [ $untar_status -ne 0 ]; then \n'
937              txt += '   echo "SET_EXE 1 ==> ERROR Untarring .tgz file failed"\n'
938              txt += '   echo "JOB_EXIT_STATUS = $untar_status" \n'
939 <            txt += '   echo "SanityCheckCode = $untar_status" | tee -a $repo\n'
939 >            txt += '   echo "JobExitCode=$untar_status" | tee -a $RUNTIME_AREA/$repo\n'
940              txt += '   if [ $middleware == OSG ]; then \n'
941              txt += '       echo "Remove working directory: $WORKING_DIR"\n'
942              txt += '       cd $RUNTIME_AREA\n'
943              txt += '       /bin/rm -rf $WORKING_DIR\n'
944              txt += '       if [ -d $WORKING_DIR ] ;then\n'
945 <            txt += '           echo "OSG WORKING DIR ==> $WORKING_DIR could not be deleted on on WN `hostname`"\n'
945 >            txt += '           echo "SET_EXE 50999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Untarring .tgz file failed"\n'
946 >            txt += '           echo "JOB_EXIT_STATUS = 50999"\n'
947 >            txt += '           echo "JobExitCode=50999" | tee -a $RUNTIME_AREA/$repo\n'
948 >            txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
949 >            txt += '           rm -f $RUNTIME_AREA/$repo \n'
950 >            txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
951 >            txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
952              txt += '       fi\n'
953              txt += '   fi \n'
954              txt += '   \n'
955 <            txt += '   exit $untar_status \n'
955 >            txt += '   exit 1 \n'
956              txt += 'else \n'
957              txt += '   echo "Successful untar" \n'
958              txt += 'fi \n'
959 +            txt += '\n'
960 +            txt += 'echo "Include ProdAgentApi in PYTHONPATH"\n'
961 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
962 +            txt += '   export PYTHONPATH=ProdAgentApi\n'
963 +            txt += 'else\n'
964 +            txt += '   export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n'
965 +            txt += 'fi\n'
966 +            txt += '\n'
967 +
968              pass
969          
970          return txt
# Line 673 | Line 976 | class Cmssw(JobType):
976          """
977          
978      def executableName(self):
979 <        return self.executable
979 >        if self.scriptExe: #CarlosDaniele
980 >            return "sh "
981 >        else:
982 >            return self.executable
983  
984      def executableArgs(self):
985 <        return " -p pset.cfg"
985 >        if self.scriptExe:#CarlosDaniele
986 >            return   self.scriptExe + " $NJob"
987 >        else:
988 >            return " -p pset.cfg"
989  
990      def inputSandbox(self, nj):
991          """
992          Returns a list of filenames to be put in JDL input sandbox.
993          """
994          inp_box = []
995 <        # dict added to delete duplicate from input sandbox file list
996 <        seen = {}
995 >        # # dict added to delete duplicate from input sandbox file list
996 >        # seen = {}
997          ## code
998          if os.path.isfile(self.tgzNameWithPath):
999              inp_box.append(self.tgzNameWithPath)
1000 +        if os.path.isfile(self.MLtgzfile):
1001 +            inp_box.append(self.MLtgzfile)
1002          ## config
1003 <        inp_box.append(common.job_list[nj].configFilename())
1003 >        if not self.pset is None:
1004 >            inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename())
1005          ## additional input files
1006 <        #for file in self.additional_inbox_files:
1007 <        #    inp_box.append(common.work_space.cwdDir()+file)
1006 >        for file in self.additional_inbox_files:
1007 >            inp_box.append(file)
1008          return inp_box
1009  
1010      def outputSandbox(self, nj):
# Line 701 | Line 1013 | class Cmssw(JobType):
1013          """
1014          out_box = []
1015  
704        stdout=common.job_list[nj].stdout()
705        stderr=common.job_list[nj].stderr()
706
1016          ## User Declared output files
1017 <        for out in self.output_file:
1017 >        for out in (self.output_file+self.output_file_sandbox):
1018              n_out = nj + 1
1019              out_box.append(self.numberFile_(out,str(n_out)))
1020          return out_box
712        return []
1021  
1022      def prepareSteeringCards(self):
1023          """
# Line 723 | Line 1031 | class Cmssw(JobType):
1031          """
1032  
1033          txt = '\n'
1034 <        file_list = ''
1035 <        check = len(self.output_file)
1036 <        i = 0
1037 <        for fileWithSuffix in self.output_file:
730 <            i= i + 1
1034 >        txt += '# directory content\n'
1035 >        txt += 'ls \n'
1036 >
1037 >        for fileWithSuffix in (self.output_file+self.output_file_sandbox):
1038              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
732            file_list=file_list+output_file_num+''
733            txt += '\n'
734            txt += 'ls \n'
1039              txt += '\n'
1040 +            txt += '# check output file\n'
1041              txt += 'ls '+fileWithSuffix+'\n'
1042 <            txt += 'exe_result=$?\n'
1043 <            txt += 'if [ $exe_result -ne 0 ] ; then\n'
1044 <            txt += '   echo "ERROR: No output file to manage"\n'
1045 <            ### OLI_DANIELE
1046 <            txt += '    if [ $middleware == OSG ]; then \n'
1047 <            txt += '        echo "prepare dummy output file"\n'
1048 <            txt += '        cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1049 <            txt += '    fi \n'
1042 >            txt += 'ls_result=$?\n'
1043 >            txt += 'if [ $ls_result -ne 0 ] ; then\n'
1044 >            txt += '   echo "ERROR: Problem with output file"\n'
1045 >            if common.scheduler.boss_scheduler_name == 'condor_g':
1046 >                txt += '    if [ $middleware == OSG ]; then \n'
1047 >                txt += '        echo "prepare dummy output file"\n'
1048 >                txt += '        echo "Processing of job output failed" > $RUNTIME_AREA/'+output_file_num+'\n'
1049 >                txt += '    fi \n'
1050              txt += 'else\n'
1051              txt += '   cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1052              txt += 'fi\n'
748            if i == check:
749                txt += 'cd $RUNTIME_AREA\n'
750                pass      
751            pass
1053        
1054 <        file_list=file_list[:-1]
1055 <        txt += 'file_list="'+file_list+'"\n'
1054 >        txt += 'cd $RUNTIME_AREA\n'
1055 >        txt += 'cd $RUNTIME_AREA\n'
1056          ### OLI_DANIELE
1057          txt += 'if [ $middleware == OSG ]; then\n'  
1058          txt += '    cd $RUNTIME_AREA\n'
1059          txt += '    echo "Remove working directory: $WORKING_DIR"\n'
1060          txt += '    /bin/rm -rf $WORKING_DIR\n'
1061          txt += '    if [ -d $WORKING_DIR ] ;then\n'
1062 <        txt += '        echo "OSG WORKING DIR ==> $WORKING_DIR could not be deleted on on WN `hostname`"\n'
1062 >        txt += '        echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
1063 >        txt += '        echo "JOB_EXIT_STATUS = 60999"\n'
1064 >        txt += '        echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
1065 >        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1066 >        txt += '        rm -f $RUNTIME_AREA/$repo \n'
1067 >        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1068 >        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1069          txt += '    fi\n'
1070          txt += 'fi\n'
1071          txt += '\n'
1072 +
1073 +        file_list = ''
1074 +        ## Add to filelist only files to be possibly copied to SE
1075 +        for fileWithSuffix in self.output_file:
1076 +            output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
1077 +            file_list=file_list+output_file_num+' '
1078 +        file_list=file_list[:-1]
1079 +        txt += 'file_list="'+file_list+'"\n'
1080 +
1081          return txt
1082  
1083      def numberFile_(self, file, txt):
# Line 776 | Line 1092 | class Cmssw(JobType):
1092          # add "_txt"
1093          if len(p)>1:
1094            ext = p[len(p)-1]
779          #result = name + '_' + str(txt) + "." + ext
1095            result = name + '_' + txt + "." + ext
1096          else:
782          #result = name + '_' + str(txt)
1097            result = name + '_' + txt
1098          
1099          return result
1100  
1101 <    def getRequirements(self):
1101 >    def getRequirements(self, nj=[]):
1102          """
1103          return job requirements to add to jdl files
1104          """
1105          req = ''
1106 <        if common.analisys_common_info['sites']:
1107 <            if common.analisys_common_info['sw_version']:
1108 <                req='Member("VO-cms-' + \
1109 <                     common.analisys_common_info['sw_version'] + \
1110 <                     '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1111 <            if len(common.analisys_common_info['sites'])>0:
1112 <                req = req + ' && ('
799 <                for i in range(len(common.analisys_common_info['sites'])):
800 <                    req = req + 'other.GlueCEInfoHostName == "' \
801 <                         + common.analisys_common_info['sites'][i] + '"'
802 <                    if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
803 <                        req = req + ' || '
804 <            req = req + ')'
805 <        #print "req = ", req
1106 >        if self.version:
1107 >            req='Member("VO-cms-' + \
1108 >                 self.version + \
1109 >                 '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1110 >
1111 >        req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
1112 >
1113          return req
1114  
1115      def configFilename(self):
# Line 820 | Line 1127 | class Cmssw(JobType):
1127          txt += '   if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
1128          txt += '      # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
1129          txt += '       source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
1130 <        txt += '   elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
1131 <        txt += '      # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
1132 <        txt += '       source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
1130 >        txt += '   elif [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n'
1131 >        txt += '      # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n'
1132 >        txt += '       source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n'
1133          txt += '   else\n'
1134 <        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1134 >        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1135          txt += '       echo "JOB_EXIT_STATUS = 10020"\n'
1136          txt += '       echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1137          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1138 <        txt += '       exit\n'
1138 >        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1139 >        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1140 >        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1141 >        txt += '       exit 1\n'
1142          txt += '\n'
1143          txt += '       echo "Remove working directory: $WORKING_DIR"\n'
1144          txt += '       cd $RUNTIME_AREA\n'
1145          txt += '       /bin/rm -rf $WORKING_DIR\n'
1146          txt += '       if [ -d $WORKING_DIR ] ;then\n'
1147 <        txt += '           echo "OSG WORKING DIR ==> $WORKING_DIR could not be deleted on on WN `hostname`"\n'
1147 >        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1148 >        txt += '            echo "JOB_EXIT_STATUS = 10017"\n'
1149 >        txt += '            echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1150 >        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
1151 >        txt += '            rm -f $RUNTIME_AREA/$repo \n'
1152 >        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1153 >        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1154          txt += '       fi\n'
1155          txt += '\n'
1156 <        txt += '       exit\n'
1156 >        txt += '       exit 1\n'
1157          txt += '   fi\n'
1158          txt += '\n'
1159          txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
# Line 853 | Line 1169 | class Cmssw(JobType):
1169          """
1170          txt  = '   \n'
1171          txt += '   echo " ### SETUP CMS LCG  ENVIRONMENT ### "\n'
856        txt += '      echo "JOB_EXIT_STATUS = 0"\n'
1172          txt += '   if [ ! $VO_CMS_SW_DIR ] ;then\n'
1173          txt += '       echo "SET_CMS_ENV 10031 ==> ERROR CMS software dir not found on WN `hostname`"\n'
1174          txt += '       echo "JOB_EXIT_STATUS = 10031" \n'
1175          txt += '       echo "JobExitCode=10031" | tee -a $RUNTIME_AREA/$repo\n'
1176          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1177 <        txt += '       exit\n'
1177 >        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1178 >        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1179 >        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1180 >        txt += '       exit 1\n'
1181          txt += '   else\n'
1182          txt += '       echo "Sourcing environment... "\n'
1183          txt += '       if [ ! -s $VO_CMS_SW_DIR/cmsset_default.sh ] ;then\n'
# Line 867 | Line 1185 | class Cmssw(JobType):
1185          txt += '           echo "JOB_EXIT_STATUS = 10020"\n'
1186          txt += '           echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1187          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1188 <        txt += '           exit\n'
1188 >        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1189 >        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1190 >        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1191 >        txt += '           exit 1\n'
1192          txt += '       fi\n'
1193          txt += '       echo "sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1194          txt += '       source $VO_CMS_SW_DIR/cmsset_default.sh\n'
# Line 877 | Line 1198 | class Cmssw(JobType):
1198          txt += '           echo "JOB_EXIT_STATUS = 10032"\n'
1199          txt += '           echo "JobExitCode=10032" | tee -a $RUNTIME_AREA/$repo\n'
1200          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1201 <        txt += '           exit\n'
1201 >        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1202 >        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1203 >        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1204 >        txt += '           exit 1\n'
1205          txt += '       fi\n'
1206          txt += '   fi\n'
1207          txt += '   \n'
884        txt += '   string=`cat /etc/redhat-release`\n'
885        txt += '   echo $string\n'
886        txt += '   if [[ $string = *alhalla* ]]; then\n'
887        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
888        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
889        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
890        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
891        txt += '   else\n'
892        txt += '       echo "SET_CMS_ENV 1 ==> ERROR OS unknown, LCG environment not initialized"\n'
893        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
894        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
895        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
896        txt += '       exit\n'
897        txt += '   fi\n'
1208          txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1209          txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1210          return txt
# Line 904 | Line 1214 | class Cmssw(JobType):
1214  
1215      def getParams(self):
1216          return self._params
1217 +
1218 +    def setTaskid_(self):
1219 +        self._taskId = self.cfg_params['taskId']
1220 +        
1221 +    def getTaskid(self):
1222 +        return self._taskId
1223 +
1224 + #######################################################################
1225 +    def uniquelist(self, old):
1226 +        """
1227 +        remove duplicates from a list
1228 +        """
1229 +        nd={}
1230 +        for e in old:
1231 +            nd[e]=0
1232 +        return nd.keys()

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines