ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.13 by gutsche, Tue Jun 27 02:31:31 2006 UTC vs.
Revision 1.75 by gutsche, Sun Apr 8 23:50:33 2007 UTC

# Line 4 | Line 4 | from crab_exceptions import *
4   from crab_util import *
5   import common
6   import PsetManipulator  
7 <
8 < import DBSInfo_EDM
9 < import DataDiscovery_EDM
10 < import DataLocation_EDM
7 > import DataDiscovery
8 > import DataDiscovery_DBS2
9 > import DataLocation
10   import Scram
11  
12 < import os, string, re
12 > import os, string, re, shutil, glob
13  
14   class Cmssw(JobType):
15 <    def __init__(self, cfg_params):
15 >    def __init__(self, cfg_params, ncjobs):
16          JobType.__init__(self, 'CMSSW')
17          common.logger.debug(3,'CMSSW::__init__')
18  
20        self.analisys_common_info = {}
19          # Marco.
20          self._params = {}
21          self.cfg_params = cfg_params
22 +
23 +        try:
24 +            self.MaxTarBallSize = float(self.cfg_params['EDG.maxtarballsize'])
25 +        except KeyError:
26 +            self.MaxTarBallSize = 100.0
27 +
28 +        # number of jobs requested to be created, limit obj splitting
29 +        self.ncjobs = ncjobs
30 +
31          log = common.logger
32          
33          self.scram = Scram.Scram(cfg_params)
27        scramArea = ''
34          self.additional_inbox_files = []
35          self.scriptExe = ''
36          self.executable = ''
37 +        self.executable_arch = self.scram.getArch()
38          self.tgz_name = 'default.tgz'
39 +        self.scriptName = 'CMSSW.sh'
40 +        self.pset = ''      #scrip use case Da  
41 +        self.datasetPath = '' #scrip use case Da
42  
43 +        # set FJR file name
44 +        self.fjrFileName = 'crab_fjr.xml'
45  
46          self.version = self.scram.getSWVersion()
47 +        common.taskDB.setDict('codeVersion',self.version)
48          self.setParam_('application', self.version)
36        common.analisys_common_info['sw_version'] = self.version
37        ### FEDE
38        common.analisys_common_info['copy_input_data'] = 0
39        common.analisys_common_info['events_management'] = 1
49  
50          ### collect Data cards
51 +
52 +        ## get DBS mode
53 +        try:
54 +            self.use_dbs_2 = int(self.cfg_params['CMSSW.use_dbs_2'])
55 +        except KeyError:
56 +            self.use_dbs_2 = 0
57 +            
58          try:
59              tmp =  cfg_params['CMSSW.datasetpath']
60              log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
61              if string.lower(tmp)=='none':
62                  self.datasetPath = None
63 +                self.selectNoInput = 1
64              else:
65                  self.datasetPath = tmp
66 +                self.selectNoInput = 0
67          except KeyError:
68              msg = "Error: datasetpath not defined "  
69              raise CrabException(msg)
# Line 57 | Line 75 | class Cmssw(JobType):
75              self.setParam_('owner', 'None')
76          else:
77              datasetpath_split = self.datasetPath.split("/")
78 <            self.setParam_('dataset', datasetpath_split[1])
79 <            self.setParam_('owner', datasetpath_split[-1])
78 >            if self.use_dbs_2 == 1 :
79 >                self.setParam_('dataset', datasetpath_split[1])
80 >                self.setParam_('owner', datasetpath_split[2])
81 >            else :
82 >                self.setParam_('dataset', datasetpath_split[1])
83 >                self.setParam_('owner', datasetpath_split[-1])
84  
85          self.setTaskid_()
86          self.setParam_('taskId', self.cfg_params['taskId'])
# Line 82 | Line 104 | class Cmssw(JobType):
104          try:
105              self.pset = cfg_params['CMSSW.pset']
106              log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
107 <            if (not os.path.exists(self.pset)):
108 <                raise CrabException("User defined PSet file "+self.pset+" does not exist")
107 >            if self.pset.lower() != 'none' :
108 >                if (not os.path.exists(self.pset)):
109 >                    raise CrabException("User defined PSet file "+self.pset+" does not exist")
110 >            else:
111 >                self.pset = None
112          except KeyError:
113              raise CrabException("PSet file missing. Cannot run cmsRun ")
114  
115          # output files
116 +        ## stuff which must be returned always via sandbox
117 +        self.output_file_sandbox = []
118 +
119 +        # add fjr report by default via sandbox
120 +        self.output_file_sandbox.append(self.fjrFileName)
121 +
122 +        # other output files to be returned via sandbox or copied to SE
123          try:
124              self.output_file = []
93
125              tmp = cfg_params['CMSSW.output_file']
126              if tmp != '':
127                  tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
# Line 100 | Line 131 | class Cmssw(JobType):
131                      self.output_file.append(tmp)
132                      pass
133              else:
134 <                log.message("No output file defined: only stdout/err will be available")
134 >                log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available")
135                  pass
136              pass
137          except KeyError:
138 <            log.message("No output file defined: only stdout/err will be available")
138 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available")
139              pass
140  
141          # script_exe file as additional file in inputSandbox
142          try:
143              self.scriptExe = cfg_params['USER.script_exe']
113            self.additional_inbox_files.append(self.scriptExe)
144              if self.scriptExe != '':
145                 if not os.path.isfile(self.scriptExe):
146 <                  msg ="WARNING. file "+self.scriptExe+" not found"
146 >                  msg ="ERROR. file "+self.scriptExe+" not found"
147                    raise CrabException(msg)
148 +               self.additional_inbox_files.append(string.strip(self.scriptExe))
149          except KeyError:
150 <           pass
151 <                  
150 >            self.scriptExe = ''
151 >
152 >        #CarlosDaniele
153 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
154 >           msg ="Error. script_exe  not defined"
155 >           raise CrabException(msg)
156 >
157          ## additional input files
158          try:
159 <            tmpAddFiles = string.split(cfg_params['CMSSW.additional_input_files'],',')
159 >            tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
160              for tmp in tmpAddFiles:
161 <                if not os.path.exists(tmp):
162 <                    raise CrabException("Additional input file not found: "+tmp)
163 <                tmp=string.strip(tmp)
164 <                self.additional_inbox_files.append(tmp)
161 >                tmp = string.strip(tmp)
162 >                dirname = ''
163 >                if not tmp[0]=="/": dirname = "."
164 >                files = glob.glob(os.path.join(dirname, tmp))
165 >                for file in files:
166 >                    if not os.path.exists(file):
167 >                        raise CrabException("Additional input file not found: "+file)
168 >                    pass
169 >                    storedFile = common.work_space.shareDir()+file
170 >                    shutil.copyfile(file, storedFile)
171 >                    self.additional_inbox_files.append(string.strip(storedFile))
172                  pass
173              pass
174 +            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
175          except KeyError:
176              pass
177  
178          # files per job
179          try:
180 <            self.filesPerJob = int(cfg_params['CMSSW.files_per_jobs']) #Daniele
181 <            self.selectFilesPerJob = 1
180 >            if (cfg_params['CMSSW.files_per_jobs']):
181 >                raise CrabException("files_per_jobs no longer supported.  Quitting.")
182          except KeyError:
183 <            self.filesPerJob = 0
140 <            self.selectFilesPerJob = 0
183 >            pass
184  
185          ## Events per job
186          try:
# Line 147 | Line 190 | class Cmssw(JobType):
190              self.eventsPerJob = -1
191              self.selectEventsPerJob = 0
192      
193 <        # To be implemented
151 <        # ## number of jobs
152 <        # try:
153 <        #     self.numberOfJobs =int( cfg_params['CMSSW.number_of_job'])
154 <        #     self.selectNumberOfJobs = 1
155 <        # except KeyError:
156 <        #     self.selectNumberOfJobs = 0
157 <
158 <        if (self.selectFilesPerJob == self.selectEventsPerJob):
159 <            msg = 'Must define either files_per_jobs or events_per_job'
160 <            raise CrabException(msg)
161 <
162 <        if (self.selectEventsPerJob  and not self.datasetPath == None):
163 <            msg = 'Splitting according to events_per_job available only with None as datasetpath'
164 <            raise CrabException(msg)
165 <    
193 >        ## number of jobs
194          try:
195 <            self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
195 >            self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs'])
196 >            self.selectNumberOfJobs = 1
197          except KeyError:
198 <            msg = 'Must define total_number_of_events'
199 <            raise CrabException(msg)
200 <        
172 <        CEBlackList = []
198 >            self.theNumberOfJobs = 0
199 >            self.selectNumberOfJobs = 0
200 >
201          try:
202 <            tmpBad = string.split(cfg_params['EDG.ce_black_list'],',')
203 <            for tmp in tmpBad:
176 <                tmp=string.strip(tmp)
177 <                CEBlackList.append(tmp)
202 >            self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
203 >            self.selectTotalNumberEvents = 1
204          except KeyError:
205 <            pass
206 <
181 <        self.reCEBlackList=[]
182 <        for bad in CEBlackList:
183 <            self.reCEBlackList.append(re.compile( bad ))
205 >            self.total_number_of_events = 0
206 >            self.selectTotalNumberEvents = 0
207  
208 <        common.logger.debug(5,'CEBlackList: '+str(CEBlackList))
208 >        if self.pset != None: #CarlosDaniele
209 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
210 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
211 >                 raise CrabException(msg)
212 >        else:
213 >             if (self.selectNumberOfJobs == 0):
214 >                 msg = 'Must specify  number_of_jobs.'
215 >                 raise CrabException(msg)
216  
217 <        CEWhiteList = []
217 >        ## source seed for pythia
218          try:
219 <            tmpGood = string.split(cfg_params['EDG.ce_white_list'],',')
190 <            for tmp in tmpGood:
191 <                tmp=string.strip(tmp)
192 <                CEWhiteList.append(tmp)
219 >            self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
220          except KeyError:
221 <            pass
222 <
196 <        #print 'CEWhiteList: ',CEWhiteList
197 <        self.reCEWhiteList=[]
198 <        for Good in CEWhiteList:
199 <            self.reCEWhiteList.append(re.compile( Good ))
221 >            self.sourceSeed = None
222 >            common.logger.debug(5,"No seed given")
223  
224 <        common.logger.debug(5,'CEWhiteList: '+str(CEWhiteList))
225 <
226 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
224 >        try:
225 >            self.sourceSeedVtx = int(cfg_params['CMSSW.vtx_seed'])
226 >        except KeyError:
227 >            self.sourceSeedVtx = None
228 >            common.logger.debug(5,"No vertex seed given")
229 >        try:
230 >            self.firstRun = int(cfg_params['CMSSW.first_run'])
231 >        except KeyError:
232 >            self.firstRun = None
233 >            common.logger.debug(5,"No first run given")
234 >        if self.pset != None: #CarlosDaniele
235 >            self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
236  
237          #DBSDLS-start
238          ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
239          self.maxEvents=0  # max events available   ( --> check the requested nb. of evts in Creator.py)
240          self.DBSPaths={}  # all dbs paths requested ( --> input to the site local discovery script)
241 +        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
242          ## Perform the data location and discovery (based on DBS/DLS)
243          ## SL: Don't if NONE is specified as input (pythia use case)
244 <        common.analisys_common_info['sites']=None
244 >        blockSites = {}
245          if self.datasetPath:
246 <            self.DataDiscoveryAndLocation(cfg_params)
246 >            blockSites = self.DataDiscoveryAndLocation(cfg_params)
247          #DBSDLS-end          
248  
249          self.tgzNameWithPath = self.getTarBall(self.executable)
217
218        # modify Pset
219        if (self.datasetPath): # standard job
220            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
221            self.PsetEdit.inputModule("INPUT") #Daniele
222
223        else:  # pythia like job
224            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
225            self.PsetEdit.pythiaSeed("INPUT") #Daniele
226            try:
227                self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
228            except KeyError:
229                self.sourceSeed = 123456
230                common.logger.message("No seed given, will use "+str(self.sourceSeed))
231        
232        self.PsetEdit.psetWriter(self.configFilename())
250      
251          ## Select Splitting
252 <        if self.selectFilesPerJob: self.jobSplittingPerFiles()
253 <        elif self.selectEventsPerJob: self.jobSplittingPerEvents()
254 <        else:
255 <            msg = 'Don\'t know how to split...'
256 <            raise CrabException(msg)
252 >        if self.selectNoInput:
253 >            if self.pset == None: #CarlosDaniele
254 >                self.jobSplittingForScript()
255 >            else:
256 >                self.jobSplittingNoInput()
257 >        else:
258 >            self.jobSplittingByBlocks(blockSites)
259  
260 +        # modify Pset
261 +        if self.pset != None: #CarlosDaniele
262 +            try:
263 +                if (self.datasetPath): # standard job
264 +                    # allow to processa a fraction of events in a file
265 +                    self.PsetEdit.inputModule("INPUT")
266 +                    self.PsetEdit.maxEvent("INPUTMAXEVENTS")
267 +                    self.PsetEdit.skipEvent("INPUTSKIPEVENTS")
268 +                else:  # pythia like job
269 +                    self.PsetEdit.maxEvent(self.eventsPerJob)
270 +                    if (self.firstRun):
271 +                        self.PsetEdit.pythiaFirstRun("INPUTFIRSTRUN")  #First Run
272 +                    if (self.sourceSeed) :
273 +                        self.PsetEdit.pythiaSeed("INPUT")
274 +                        if (self.sourceSeedVtx) :
275 +                            self.PsetEdit.pythiaSeedVtx("INPUTVTX")
276 +                # add FrameworkJobReport to parameter-set
277 +                self.PsetEdit.addCrabFJR(self.fjrFileName)
278 +                self.PsetEdit.psetWriter(self.configFilename())
279 +            except:
280 +                msg='Error while manipuliating ParameterSet: exiting...'
281 +                raise CrabException(msg)
282  
283      def DataDiscoveryAndLocation(self, cfg_params):
284  
# Line 245 | Line 286 | class Cmssw(JobType):
286  
287          datasetPath=self.datasetPath
288  
248        ## TODO
249        dataTiersList = ""
250        dataTiers = dataTiersList.split(',')
251
289          ## Contact the DBS
290 +        common.logger.message("Contacting DBS...")
291          try:
292 <            self.pubdata=DataDiscovery_EDM.DataDiscovery_EDM(datasetPath, dataTiers, cfg_params)
292 >
293 >            if self.use_dbs_2 == 1 :
294 >                self.pubdata=DataDiscovery_DBS2.DataDiscovery_DBS2(datasetPath, cfg_params)
295 >            else :
296 >                self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
297              self.pubdata.fetchDBSInfo()
298  
299 <        except DataDiscovery_EDM.NotExistingDatasetError, ex :
299 >        except DataDiscovery.NotExistingDatasetError, ex :
300              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
301              raise CrabException(msg)
302 <
303 <        except DataDiscovery_EDM.NoDataTierinProvenanceError, ex :
302 >        except DataDiscovery.NoDataTierinProvenanceError, ex :
303 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
304 >            raise CrabException(msg)
305 >        except DataDiscovery.DataDiscoveryError, ex:
306 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
307 >            raise CrabException(msg)
308 >        except DataDiscovery_DBS2.NotExistingDatasetError_DBS2, ex :
309 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
310 >            raise CrabException(msg)
311 >        except DataDiscovery_DBS2.NoDataTierinProvenanceError_DBS2, ex :
312              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
313              raise CrabException(msg)
314 <        except DataDiscovery_EDM.DataDiscoveryError, ex:
315 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
314 >        except DataDiscovery_DBS2.DataDiscoveryError_DBS2, ex:
315 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
316              raise CrabException(msg)
317  
318          ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
269        ## self.DBSPaths=self.pubdata.getDBSPaths()
319          common.logger.message("Required data are :"+self.datasetPath)
320  
321 <        filesbyblock=self.pubdata.getFiles()
322 <        self.AllInputFiles=filesbyblock.values()
323 <        self.files = self.AllInputFiles        
275 <
276 <        ## TEMP
277 <    #    self.filesTmp = filesbyblock.values()
278 <    #    self.files = []
279 <    #    locPath='rfio:cmsbose2.bo.infn.it:/flatfiles/SE00/cms/fanfani/ProdTest/'
280 <    #    locPath=''
281 <    #    tmp = []
282 <    #    for file in self.filesTmp[0]:
283 <    #        tmp.append(locPath+file)
284 <    #    self.files.append(tmp)
285 <        ## END TEMP
321 >        self.filesbyblock=self.pubdata.getFiles()
322 >        self.eventsbyblock=self.pubdata.getEventsPerBlock()
323 >        self.eventsbyfile=self.pubdata.getEventsPerFile()
324  
325          ## get max number of events
288        #common.logger.debug(10,"number of events for primary fileblocks %i"%self.pubdata.getMaxEvents())
326          self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
327 <        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
327 >        common.logger.message("The number of available events is %s\n"%self.maxEvents)
328  
329 +        common.logger.message("Contacting DLS...")
330          ## Contact the DLS and build a list of sites hosting the fileblocks
331          try:
332 <            dataloc=DataLocation_EDM.DataLocation_EDM(filesbyblock.keys(),cfg_params)
332 >            dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params)
333              dataloc.fetchDLSInfo()
334 <        except DataLocation_EDM.DataLocationError , ex:
334 >        except DataLocation.DataLocationError , ex:
335              msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage()
336              raise CrabException(msg)
337          
300        allsites=dataloc.getSites()
301        common.logger.debug(5,"sites are %s"%allsites)
302        sites=self.checkBlackList(allsites)
303        common.logger.debug(5,"sites are (after black list) %s"%sites)
304        sites=self.checkWhiteList(sites)
305        common.logger.debug(5,"sites are (after white list) %s"%sites)
338  
339 <        if len(sites)==0:
340 <            msg = 'No sites hosting all the needed data! Exiting... '
341 <            raise CrabException(msg)
339 >        sites = dataloc.getSites()
340 >        allSites = []
341 >        listSites = sites.values()
342 >        for listSite in listSites:
343 >            for oneSite in listSite:
344 >                allSites.append(oneSite)
345 >        allSites = self.uniquelist(allSites)
346  
347 <        common.logger.message("List of Sites hosting the data : "+str(sites))
348 <        common.logger.debug(6, "List of Sites: "+str(sites))
349 <        common.analisys_common_info['sites']=sites    ## used in SchedulerEdg.py in createSchScript
314 <        self.setParam_('TargetCE', ','.join(sites))
315 <        return
347 >        common.logger.message("Sites ("+str(len(allSites))+") hosting part/all of dataset: "+str(allSites))
348 >        common.logger.debug(6, "List of Sites: "+str(allSites))
349 >        return sites
350      
351 <    def jobSplittingPerFiles(self):
318 <        """
319 <        Perform job splitting based on number of files to be accessed per job
351 >    def jobSplittingByBlocks(self, blockSites):
352          """
353 <        common.logger.debug(5,'Splitting per input files')
354 <        common.logger.message('Required '+str(self.filesPerJob)+' files per job ')
355 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
356 <
357 <        ## TODO: SL need to have (from DBS) a detailed list of how many events per each file
358 <        n_tot_files = (len(self.files[0]))
359 <        #print "n_tot_files = ", n_tot_files
360 <        ## SL: this is wrong if the files have different number of events
361 <        #print "self.maxEvents = ", self.maxEvents
362 <        evPerFile = int(self.maxEvents)/n_tot_files
363 <        #print "evPerFile = int(self.maxEvents)/n_tot_files =  ", evPerFile
364 <
365 <        common.logger.debug(5,'Events per File '+str(evPerFile))
366 <
367 <        ## if asked to process all events, do it
368 <        if self.total_number_of_events == -1:
369 <            self.total_number_of_events=self.maxEvents
370 <            self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
371 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for all available events '+str(self.total_number_of_events)+' events')
372 <        
353 >        Perform job splitting. Jobs run over an integer number of files
354 >        and no more than one block.
355 >        ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values
356 >        REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
357 >                  self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
358 >                  self.maxEvents, self.filesbyblock
359 >        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
360 >              self.total_number_of_jobs - Total # of jobs
361 >              self.list_of_args - File(s) job will run on (a list of lists)
362 >        """
363 >
364 >        # ---- Handle the possible job splitting configurations ---- #
365 >        if (self.selectTotalNumberEvents):
366 >            totalEventsRequested = self.total_number_of_events
367 >        if (self.selectEventsPerJob):
368 >            eventsPerJobRequested = self.eventsPerJob
369 >            if (self.selectNumberOfJobs):
370 >                totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob
371 >
372 >        # If user requested all the events in the dataset
373 >        if (totalEventsRequested == -1):
374 >            eventsRemaining=self.maxEvents
375 >        # If user requested more events than are in the dataset
376 >        elif (totalEventsRequested > self.maxEvents):
377 >            eventsRemaining = self.maxEvents
378 >            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
379 >        # If user requested less events than are in the dataset
380          else:
381 <            #print "self.total_number_of_events = ", self.total_number_of_events
382 <            #print "evPerFile = ", evPerFile
383 <            self.total_number_of_files = int(self.total_number_of_events/evPerFile)
384 <            #print "self.total_number_of_files = int(self.total_number_of_events/evPerFile) = " , self.total_number_of_files
385 <            ## SL: if ask for less event than what is computed to be available on a
386 <            ##     file, process the first file anyhow.
387 <            if self.total_number_of_files == 0:
388 <                self.total_number_of_files = self.total_number_of_files + 1
350 <                
381 >            eventsRemaining = totalEventsRequested
382 >
383 >        # If user requested more events per job than are in the dataset
384 >        if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents):
385 >            eventsPerJobRequested = self.maxEvents
386 >
387 >        # For user info at end
388 >        totalEventCount = 0
389  
390 <            common.logger.debug(5,'N files  '+str(self.total_number_of_files))
390 >        if (self.selectTotalNumberEvents and self.selectNumberOfJobs):
391 >            eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
392  
393 <            check = 0
393 >        if (self.selectNumberOfJobs):
394 >            common.logger.message("May not create the exact number_of_jobs requested.")
395 >
396 >        if ( self.ncjobs == 'all' ) :
397 >            totalNumberOfJobs = 999999999
398 >        else :
399 >            totalNumberOfJobs = self.ncjobs
400              
356            ## Compute the number of jobs
357            #self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
358            #print "self.total_number_of_files = ", self.total_number_of_files
359            #print "self.filesPerJob = ", self.filesPerJob
360            self.total_number_of_jobs = int(self.total_number_of_files/self.filesPerJob)
361            #print "self.total_number_of_jobs = ", self.total_number_of_jobs
362            common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
363
364            ## is there any remainder?
365            check = int(self.total_number_of_files) - (int(self.total_number_of_jobs)*self.filesPerJob)
366
367            common.logger.debug(5,'Check  '+str(check))
368
369            if check > 0:
370                self.total_number_of_jobs =  self.total_number_of_jobs + 1
371                common.logger.message('Warning: last job will be created with '+str(check)+' files')
401  
402 <            #common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs-1)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
403 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
404 <            pass
402 >        blocks = blockSites.keys()
403 >        blockCount = 0
404 >        # Backup variable in case self.maxEvents counted events in a non-included block
405 >        numBlocksInDataset = len(blocks)
406  
407 +        jobCount = 0
408          list_of_lists = []
409 <        for i in xrange(0, int(n_tot_files), self.filesPerJob):
410 <            parString = "\\{"
409 >
410 >        # ---- Iterate over the blocks in the dataset until ---- #
411 >        # ---- we've met the requested total # of events    ---- #
412 >        while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
413 >            block = blocks[blockCount]
414 >            blockCount += 1
415              
416 <            params = self.files[0][i: i+self.filesPerJob]
417 <            for i in range(len(params) - 1):
418 <                parString += '\\\"' + params[i] + '\\\"\,'
416 >            if self.eventsbyblock.has_key(block) :
417 >                numEventsInBlock = self.eventsbyblock[block]
418 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
419              
420 <            parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
421 <            list_of_lists.append(parString)
422 <            pass
423 <
420 >                files = self.filesbyblock[block]
421 >                numFilesInBlock = len(files)
422 >                if (numFilesInBlock <= 0):
423 >                    continue
424 >                fileCount = 0
425 >
426 >                # ---- New block => New job ---- #
427 >                parString = "\\{"
428 >                # counter for number of events in files currently worked on
429 >                filesEventCount = 0
430 >                # flag if next while loop should touch new file
431 >                newFile = 1
432 >                # job event counter
433 >                jobSkipEventCount = 0
434 >            
435 >                # ---- Iterate over the files in the block until we've met the requested ---- #
436 >                # ---- total # of events or we've gone over all the files in this block  ---- #
437 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
438 >                    file = files[fileCount]
439 >                    if newFile :
440 >                        try:
441 >                            numEventsInFile = self.eventsbyfile[file]
442 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
443 >                            # increase filesEventCount
444 >                            filesEventCount += numEventsInFile
445 >                            # Add file to current job
446 >                            parString += '\\\"' + file + '\\\"\,'
447 >                            newFile = 0
448 >                        except KeyError:
449 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
450 >                        
451 >
452 >                    # if less events in file remain than eventsPerJobRequested
453 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) :
454 >                        # if last file in block
455 >                        if ( fileCount == numFilesInBlock-1 ) :
456 >                            # end job using last file, use remaining events in block
457 >                            # close job and touch new file
458 >                            fullString = parString[:-2]
459 >                            fullString += '\\}'
460 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
461 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
462 >                            self.jobDestination.append(blockSites[block])
463 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
464 >                            # reset counter
465 >                            jobCount = jobCount + 1
466 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
467 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
468 >                            jobSkipEventCount = 0
469 >                            # reset file
470 >                            parString = "\\{"
471 >                            filesEventCount = 0
472 >                            newFile = 1
473 >                            fileCount += 1
474 >                        else :
475 >                            # go to next file
476 >                            newFile = 1
477 >                            fileCount += 1
478 >                    # if events in file equal to eventsPerJobRequested
479 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
480 >                        # close job and touch new file
481 >                        fullString = parString[:-2]
482 >                        fullString += '\\}'
483 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
484 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
485 >                        self.jobDestination.append(blockSites[block])
486 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
487 >                        # reset counter
488 >                        jobCount = jobCount + 1
489 >                        totalEventCount = totalEventCount + eventsPerJobRequested
490 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
491 >                        jobSkipEventCount = 0
492 >                        # reset file
493 >                        parString = "\\{"
494 >                        filesEventCount = 0
495 >                        newFile = 1
496 >                        fileCount += 1
497 >                        
498 >                    # if more events in file remain than eventsPerJobRequested
499 >                    else :
500 >                        # close job but don't touch new file
501 >                        fullString = parString[:-2]
502 >                        fullString += '\\}'
503 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
504 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
505 >                        self.jobDestination.append(blockSites[block])
506 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
507 >                        # increase counter
508 >                        jobCount = jobCount + 1
509 >                        totalEventCount = totalEventCount + eventsPerJobRequested
510 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
511 >                        # calculate skip events for last file
512 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
513 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
514 >                        # remove all but the last file
515 >                        filesEventCount = self.eventsbyfile[file]
516 >                        parString = "\\{"
517 >                        parString += '\\\"' + file + '\\\"\,'
518 >                    pass # END if
519 >                pass # END while (iterate over files in the block)
520 >        pass # END while (iterate over blocks in the dataset)
521 >        self.ncjobs = self.total_number_of_jobs = jobCount
522 >        if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
523 >            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
524 >        common.logger.message("\n"+str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
525 >        
526          self.list_of_args = list_of_lists
390        #print self.list_of_args
527          return
528  
529 <    def jobSplittingPerEvents(self):
529 >    def jobSplittingNoInput(self):
530          """
531          Perform job splitting based on number of event per job
532          """
533          common.logger.debug(5,'Splitting per events')
534          common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
535 +        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
536          common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
537  
538          if (self.total_number_of_events < 0):
539              msg='Cannot split jobs per Events with "-1" as total number of events'
540              raise CrabException(msg)
541  
542 <        self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
543 <
544 <        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
545 <        print "self.total_number_of_events = ", self.total_number_of_events
546 <        print "self.eventsPerJob = ", self.eventsPerJob
547 <        print "self.total_number_of_jobs = ", self.total_number_of_jobs
548 <        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
549 <        
542 >        if (self.selectEventsPerJob):
543 >            if (self.selectTotalNumberEvents):
544 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
545 >            elif(self.selectNumberOfJobs) :  
546 >                self.total_number_of_jobs =self.theNumberOfJobs
547 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
548 >
549 >        elif (self.selectNumberOfJobs) :
550 >            self.total_number_of_jobs = self.theNumberOfJobs
551 >            self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
552 >
553          common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
554  
555          # is there any remainder?
# Line 417 | Line 557 | class Cmssw(JobType):
557  
558          common.logger.debug(5,'Check  '+str(check))
559  
560 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
561          if check > 0:
562 <            common.logger.message('Warning: asked '+self.total_number_of_events+' but will do only '+(int(self.total_number_of_jobs)*self.eventsPerJob))
422 <
423 <        common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
562 >            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
563  
564          # argument is seed number.$i
565          self.list_of_args = []
566          for i in range(self.total_number_of_jobs):
567 <            self.list_of_args.append(int(str(self.sourceSeed)+str(i)))
568 <        print self.list_of_args
567 >            ## Since there is no input, any site is good
568 >           # self.jobDestination.append(["Any"])
569 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
570 >            args=''
571 >            if (self.firstRun):
572 >                    ## pythia first run
573 >                #self.list_of_args.append([(str(self.firstRun)+str(i))])
574 >                args=args+(str(self.firstRun)+str(i))
575 >            else:
576 >                ## no first run
577 >                #self.list_of_args.append([str(i)])
578 >                args=args+str(i)
579 >            if (self.sourceSeed):
580 >                if (self.sourceSeedVtx):
581 >                    ## pythia + vtx random seed
582 >                    #self.list_of_args.append([
583 >                    #                          str(self.sourceSeed)+str(i),
584 >                    #                          str(self.sourceSeedVtx)+str(i)
585 >                    #                          ])
586 >                    args=args+str(',')+str(self.sourceSeed)+str(i)+str(',')+str(self.sourceSeedVtx)+str(i)
587 >                else:
588 >                    ## only pythia random seed
589 >                    #self.list_of_args.append([(str(self.sourceSeed)+str(i))])
590 >                    args=args +str(',')+str(self.sourceSeed)+str(i)
591 >            else:
592 >                ## no random seed
593 >                if str(args)=='': args=args+(str(self.firstRun)+str(i))
594 >            arguments=args.split(',')
595 >            if len(arguments)==3:self.list_of_args.append([str(arguments[0]),str(arguments[1]),str(arguments[2])])
596 >            elif len(arguments)==2:self.list_of_args.append([str(arguments[0]),str(arguments[1])])
597 >            else :self.list_of_args.append([str(arguments[0])])
598 >            
599 >     #   print self.list_of_args
600  
601          return
602  
603 +
604 +    def jobSplittingForScript(self):#CarlosDaniele
605 +        """
606 +        Perform job splitting based on number of job
607 +        """
608 +        common.logger.debug(5,'Splitting per job')
609 +        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
610 +
611 +        self.total_number_of_jobs = self.theNumberOfJobs
612 +
613 +        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
614 +
615 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
616 +
617 +        # argument is seed number.$i
618 +        self.list_of_args = []
619 +        for i in range(self.total_number_of_jobs):
620 +            ## Since there is no input, any site is good
621 +           # self.jobDestination.append(["Any"])
622 +            self.jobDestination.append([""])
623 +            ## no random seed
624 +            self.list_of_args.append([str(i)])
625 +        return
626 +
627      def split(self, jobParams):
628  
629          common.jobDB.load()
# Line 441 | Line 635 | class Cmssw(JobType):
635              jobParams.append("")
636          
637          for job in range(njobs):
638 <            jobParams[job] = str(arglist[job])
638 >            jobParams[job] = arglist[job]
639 >            # print str(arglist[job])
640 >            # print jobParams[job]
641              common.jobDB.setArguments(job, jobParams[job])
642 +            common.logger.debug(5,"Job "+str(job)+" Destination: "+str(self.jobDestination[job]))
643 +            common.jobDB.setDestination(job, self.jobDestination[job])
644  
645          common.jobDB.save()
646          return
647      
648      def getJobTypeArguments(self, nj, sched):
649 <        return common.jobDB.arguments(nj)
649 >        result = ''
650 >        for i in common.jobDB.arguments(nj):
651 >            result=result+str(i)+" "
652 >        return result
653    
654      def numberOfJobs(self):
655          # Fabio
656          return self.total_number_of_jobs
657  
457    def checkBlackList(self, allSites):
458        if len(self.reCEBlackList)==0: return allSites
459        sites = []
460        for site in allSites:
461            common.logger.debug(10,'Site '+site)
462            good=1
463            for re in self.reCEBlackList:
464                if re.search(site):
465                    common.logger.message('CE in black list, skipping site '+site)
466                    good=0
467                pass
468            if good: sites.append(site)
469        if len(sites) == 0:
470            common.logger.debug(3,"No sites found after BlackList")
471        return sites
472
473    def checkWhiteList(self, allSites):
474
475        if len(self.reCEWhiteList)==0: return allSites
476        sites = []
477        for site in allSites:
478            good=0
479            for re in self.reCEWhiteList:
480                if re.search(site):
481                    common.logger.debug(5,'CE in white list, adding site '+site)
482                    good=1
483                if not good: continue
484                sites.append(site)
485        if len(sites) == 0:
486            common.logger.message("No sites found after WhiteList\n")
487        else:
488            common.logger.debug(5,"Selected sites via WhiteList are "+str(sites)+"\n")
489        return sites
490
658      def getTarBall(self, exe):
659          """
660          Return the TarBall with lib and exe
661          """
662          
663          # if it exist, just return it
664 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
664 >        #
665 >        # Marco. Let's start to use relative path for Boss XML files
666 >        #
667 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
668          if os.path.exists(self.tgzNameWithPath):
669              return self.tgzNameWithPath
670  
# Line 508 | Line 678 | class Cmssw(JobType):
678          # First of all declare the user Scram area
679          swArea = self.scram.getSWArea_()
680          #print "swArea = ", swArea
681 <        swVersion = self.scram.getSWVersion()
682 <        #print "swVersion = ", swVersion
681 >        # swVersion = self.scram.getSWVersion()
682 >        # print "swVersion = ", swVersion
683          swReleaseTop = self.scram.getReleaseTop_()
684          #print "swReleaseTop = ", swReleaseTop
685          
# Line 517 | Line 687 | class Cmssw(JobType):
687          if swReleaseTop == '' or swArea == swReleaseTop:
688              return
689  
690 <        filesToBeTarred = []
691 <        ## First find the executable
692 <        if (self.executable != ''):
693 <            exeWithPath = self.scram.findFile_(executable)
694 < #           print exeWithPath
695 <            if ( not exeWithPath ):
696 <                raise CrabException('User executable '+executable+' not found')
697 <
698 <            ## then check if it's private or not
699 <            if exeWithPath.find(swReleaseTop) == -1:
700 <                # the exe is private, so we must ship
701 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
702 <                path = swArea+'/'
703 <                exe = string.replace(exeWithPath, path,'')
704 <                filesToBeTarred.append(exe)
705 <                pass
706 <            else:
707 <                # the exe is from release, we'll find it on WN
708 <                pass
709 <
710 <        ## Now get the libraries: only those in local working area
711 <        libDir = 'lib'
712 <        lib = swArea+'/' +libDir
713 <        common.logger.debug(5,"lib "+lib+" to be tarred")
714 <        if os.path.exists(lib):
715 <            filesToBeTarred.append(libDir)
716 <
717 <        ## Now check if module dir is present
718 <        moduleDir = 'module'
719 <        if os.path.isdir(swArea+'/'+moduleDir):
720 <            filesToBeTarred.append(moduleDir)
721 <
722 <        ## Now check if the Data dir is present
723 <        dataDir = 'src/Data/'
724 <        if os.path.isdir(swArea+'/'+dataDir):
725 <            filesToBeTarred.append(dataDir)
726 <
727 <        ## Create the tar-ball
728 <        if len(filesToBeTarred)>0:
729 <            cwd = os.getcwd()
730 <            os.chdir(swArea)
731 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
732 <            for line in filesToBeTarred:
733 <                tarcmd = tarcmd + line + ' '
734 <            cout = runCommand(tarcmd)
735 <            if not cout:
736 <                raise CrabException('Could not create tar-ball')
737 <            os.chdir(cwd)
738 <        else:
739 <            common.logger.debug(5,"No files to be to be tarred")
690 >        import tarfile
691 >        try: # create tar ball
692 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
693 >            ## First find the executable
694 >            if (executable != ''):
695 >                exeWithPath = self.scram.findFile_(executable)
696 >                if ( not exeWithPath ):
697 >                    raise CrabException('User executable '+executable+' not found')
698 >    
699 >                ## then check if it's private or not
700 >                if exeWithPath.find(swReleaseTop) == -1:
701 >                    # the exe is private, so we must ship
702 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
703 >                    path = swArea+'/'
704 >                    # distinguish case when script is in user project area or given by full path somewhere else
705 >                    if exeWithPath.find(path) >= 0 :
706 >                        exe = string.replace(exeWithPath, path,'')
707 >                        tar.add(path+exe,os.path.basename(executable))
708 >                    else :
709 >                        tar.add(exeWithPath,os.path.basename(executable))
710 >                    pass
711 >                else:
712 >                    # the exe is from release, we'll find it on WN
713 >                    pass
714 >    
715 >            ## Now get the libraries: only those in local working area
716 >            libDir = 'lib'
717 >            lib = swArea+'/' +libDir
718 >            common.logger.debug(5,"lib "+lib+" to be tarred")
719 >            if os.path.exists(lib):
720 >                tar.add(lib,libDir)
721 >    
722 >            ## Now check if module dir is present
723 >            moduleDir = 'module'
724 >            module = swArea + '/' + moduleDir
725 >            if os.path.isdir(module):
726 >                tar.add(module,moduleDir)
727 >
728 >            ## Now check if any data dir(s) is present
729 >            swAreaLen=len(swArea)
730 >            for root, dirs, files in os.walk(swArea):
731 >                if "data" in dirs:
732 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
733 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
734 >
735 >            ## Add ProdAgent dir to tar
736 >            paDir = 'ProdAgentApi'
737 >            pa = os.environ['CRABDIR'] + '/' + 'ProdAgentApi'
738 >            if os.path.isdir(pa):
739 >                tar.add(pa,paDir)
740 >        
741 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
742 >            tar.close()
743 >        except :
744 >            raise CrabException('Could not create tar-ball')
745 >
746 >        ## check for tarball size
747 >        tarballinfo = os.stat(self.tgzNameWithPath)
748 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
749 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
750 >
751 >        ## create tar-ball with ML stuff
752 >        self.MLtgzfile =  common.work_space.pathForTgz()+'share/MLfiles.tgz'
753 >        try:
754 >            tar = tarfile.open(self.MLtgzfile, "w:gz")
755 >            path=os.environ['CRABDIR'] + '/python/'
756 >            for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py']:
757 >                tar.add(path+file,file)
758 >            common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames()))
759 >            tar.close()
760 >        except :
761 >            raise CrabException('Could not create ML files tar-ball')
762          
763          return
764          
# Line 583 | Line 775 | class Cmssw(JobType):
775          txt += 'if [ $middleware == LCG ]; then \n'
776          txt += self.wsSetupCMSLCGEnvironment_()
777          txt += 'elif [ $middleware == OSG ]; then\n'
778 <        txt += '    time=`date -u +"%s"`\n'
779 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
588 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
589 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
778 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
779 >        txt += '    echo "Created working directory: $WORKING_DIR"\n'
780          txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
781          txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
782          txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
# Line 594 | Line 784 | class Cmssw(JobType):
784          txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
785          txt += '        rm -f $RUNTIME_AREA/$repo \n'
786          txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
597        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
787          txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
788          txt += '        exit 1\n'
789          txt += '    fi\n'
# Line 617 | Line 806 | class Cmssw(JobType):
806          txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
807          txt += '   rm -f $RUNTIME_AREA/$repo \n'
808          txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
620        txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
809          txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
810          ## OLI_Daniele
811          txt += '    if [ $middleware == OSG ]; then \n'
# Line 631 | Line 819 | class Cmssw(JobType):
819          txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
820          txt += '            rm -f $RUNTIME_AREA/$repo \n'
821          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
634        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
822          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
823          txt += '        fi\n'
824          txt += '    fi \n'
825          txt += '   exit 1 \n'
826          txt += 'fi \n'
827          txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
828 +        txt += 'export SCRAM_ARCH='+self.executable_arch+'\n'
829          txt += 'cd '+self.version+'\n'
830          ### needed grep for bug in scramv1 ###
831 +        txt += scram+' runtime -sh\n'
832          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
833 +        txt += 'echo $PATH\n'
834  
835          # Handle the arguments:
836          txt += "\n"
837          txt += "## number of arguments (first argument always jobnumber)\n"
838          txt += "\n"
839 <        txt += "narg=$#\n"
840 <        txt += "if [ $narg -lt 2 ]\n"
839 > #        txt += "narg=$#\n"
840 >        txt += "if [ $nargs -lt 2 ]\n"
841          txt += "then\n"
842 <        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$narg+ \n"
842 >        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$nargs+ \n"
843          txt += '    echo "JOB_EXIT_STATUS = 50113"\n'
844          txt += '    echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
845          txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
846          txt += '    rm -f $RUNTIME_AREA/$repo \n'
847          txt += '    echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
658        txt += '    echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
848          txt += '    echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
849          ## OLI_Daniele
850          txt += '    if [ $middleware == OSG ]; then \n'
# Line 669 | Line 858 | class Cmssw(JobType):
858          txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
859          txt += '            rm -f $RUNTIME_AREA/$repo \n'
860          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
672        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
861          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
862          txt += '        fi\n'
863          txt += '    fi \n'
# Line 679 | Line 867 | class Cmssw(JobType):
867  
868          # Prepare job-specific part
869          job = common.job_list[nj]
870 <        pset = os.path.basename(job.configFilename())
871 <        txt += '\n'
872 <        if (self.datasetPath): # standard job
873 <            txt += 'InputFiles=$2\n'
874 <            txt += 'echo "Inputfiles:<$InputFiles>"\n'
875 <            txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
876 <        else:  # pythia like job
877 <            txt += 'Seed=$2\n'
878 <            txt += 'echo "Seed: <$Seed>"\n'
879 <            txt += 'sed "s#INPUT#$Seed#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
870 >        if self.pset != None: #CarlosDaniele
871 >            pset = os.path.basename(job.configFilename())
872 >            txt += '\n'
873 >            if (self.datasetPath): # standard job
874 >                #txt += 'InputFiles=$2\n'
875 >                txt += 'InputFiles=${args[1]}\n'
876 >                txt += 'MaxEvents=${args[2]}\n'
877 >                txt += 'SkipEvents=${args[3]}\n'
878 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
879 >                txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset_tmp_1.cfg\n'
880 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
881 >                txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" pset_tmp_1.cfg > pset_tmp_2.cfg\n'
882 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
883 >                txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" pset_tmp_2.cfg > pset.cfg\n'
884 >            else:  # pythia like job
885 >                if (self.sourceSeed):
886 >                    txt += 'FirstRun=${args[1]}\n'
887 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
888 >                    txt += 'sed "s#\<INPUTFIRSTRUN\>#$FirstRun#" $RUNTIME_AREA/'+pset+' > tmp_1.cfg\n'
889 >                else:
890 >                    txt += '# Copy untouched pset\n'
891 >                    txt += 'cp $RUNTIME_AREA/'+pset+' tmp_1.cfg\n'
892 >                if (self.sourceSeed):
893 > #                    txt += 'Seed=$2\n'
894 >                    txt += 'Seed=${args[2]}\n'
895 >                    txt += 'echo "Seed: <$Seed>"\n'
896 >                    txt += 'sed "s#\<INPUT\>#$Seed#" tmp_1.cfg > tmp_2.cfg\n'
897 >                    if (self.sourceSeedVtx):
898 > #                        txt += 'VtxSeed=$3\n'
899 >                        txt += 'VtxSeed=${args[3]}\n'
900 >                        txt += 'echo "VtxSeed: <$VtxSeed>"\n'
901 >                        txt += 'sed "s#INPUTVTX#$VtxSeed#" tmp_2.cfg > pset.cfg\n'
902 >                    else:
903 >                        txt += 'mv tmp_2.cfg pset.cfg\n'
904 >                else:
905 >                    txt += 'mv tmp_1.cfg pset.cfg\n'
906 >                   # txt += '# Copy untouched pset\n'
907 >                   # txt += 'cp $RUNTIME_AREA/'+pset+' pset.cfg\n'
908 >
909  
910          if len(self.additional_inbox_files) > 0:
911              for file in self.additional_inbox_files:
912 <                txt += 'if [ -e $RUNTIME_AREA/'+file+' ] ; then\n'
913 <                txt += '   cp $RUNTIME_AREA/'+file+' .\n'
914 <                txt += '   chmod +x '+file+'\n'
912 >                relFile = file.split("/")[-1]
913 >                txt += 'if [ -e $RUNTIME_AREA/'+relFile+' ] ; then\n'
914 >                txt += '   cp $RUNTIME_AREA/'+relFile+' .\n'
915 >                txt += '   chmod +x '+relFile+'\n'
916                  txt += 'fi\n'
917              pass
918  
919 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
920 <
921 <        txt += '\n'
922 <        txt += 'echo "***** cat pset.cfg *********"\n'
923 <        txt += 'cat pset.cfg\n'
924 <        txt += 'echo "****** end pset.cfg ********"\n'
925 <        txt += '\n'
926 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
927 <        # txt += 'cat pset1.cfg\n'
928 <        # txt += 'echo "****** end pset1.cfg ********"\n'
919 >        if self.pset != None: #CarlosDaniele
920 >            txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
921 >        
922 >            txt += '\n'
923 >            txt += 'echo "***** cat pset.cfg *********"\n'
924 >            txt += 'cat pset.cfg\n'
925 >            txt += 'echo "****** end pset.cfg ********"\n'
926 >            txt += '\n'
927 >            # txt += 'echo "***** cat pset1.cfg *********"\n'
928 >            # txt += 'cat pset1.cfg\n'
929 >            # txt += 'echo "****** end pset1.cfg ********"\n'
930          return txt
931  
932 <    def wsBuildExe(self, nj):
932 >    def wsBuildExe(self, nj=0):
933          """
934          Put in the script the commands to build an executable
935          or a library.
# Line 737 | Line 956 | class Cmssw(JobType):
956              txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
957              txt += '           rm -f $RUNTIME_AREA/$repo \n'
958              txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
740            txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
959              txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
960              txt += '       fi\n'
961              txt += '   fi \n'
# Line 746 | Line 964 | class Cmssw(JobType):
964              txt += 'else \n'
965              txt += '   echo "Successful untar" \n'
966              txt += 'fi \n'
967 +            txt += '\n'
968 +            txt += 'echo "Include ProdAgentApi in PYTHONPATH"\n'
969 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
970 +            txt += '   export PYTHONPATH=ProdAgentApi\n'
971 +            txt += 'else\n'
972 +            txt += '   export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n'
973 +            txt += 'fi\n'
974 +            txt += '\n'
975 +
976              pass
977          
978          return txt
# Line 757 | Line 984 | class Cmssw(JobType):
984          """
985          
986      def executableName(self):
987 <        return self.executable
987 >        if self.scriptExe: #CarlosDaniele
988 >            return "sh "
989 >        else:
990 >            return self.executable
991  
992      def executableArgs(self):
993 <        return " -p pset.cfg"
993 >        if self.scriptExe:#CarlosDaniele
994 >            return   self.scriptExe + " $NJob"
995 >        else:
996 >            return " -p pset.cfg"
997  
998      def inputSandbox(self, nj):
999          """
1000          Returns a list of filenames to be put in JDL input sandbox.
1001          """
1002          inp_box = []
1003 <        # dict added to delete duplicate from input sandbox file list
1004 <        seen = {}
1003 >        # # dict added to delete duplicate from input sandbox file list
1004 >        # seen = {}
1005          ## code
1006          if os.path.isfile(self.tgzNameWithPath):
1007              inp_box.append(self.tgzNameWithPath)
1008 +        if os.path.isfile(self.MLtgzfile):
1009 +            inp_box.append(self.MLtgzfile)
1010          ## config
1011 <        inp_box.append(common.job_list[nj].configFilename())
1011 >        if not self.pset is None:
1012 >            inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename())
1013          ## additional input files
1014 <        #for file in self.additional_inbox_files:
1015 <        #    inp_box.append(common.work_space.cwdDir()+file)
1014 >        for file in self.additional_inbox_files:
1015 >            inp_box.append(file)
1016          return inp_box
1017  
1018      def outputSandbox(self, nj):
# Line 785 | Line 1021 | class Cmssw(JobType):
1021          """
1022          out_box = []
1023  
788        stdout=common.job_list[nj].stdout()
789        stderr=common.job_list[nj].stderr()
790
1024          ## User Declared output files
1025 <        for out in self.output_file:
1025 >        for out in (self.output_file+self.output_file_sandbox):
1026              n_out = nj + 1
1027              out_box.append(self.numberFile_(out,str(n_out)))
1028          return out_box
796        return []
1029  
1030      def prepareSteeringCards(self):
1031          """
# Line 809 | Line 1041 | class Cmssw(JobType):
1041          txt = '\n'
1042          txt += '# directory content\n'
1043          txt += 'ls \n'
1044 <        file_list = ''
1045 <        for fileWithSuffix in self.output_file:
1044 >
1045 >        for fileWithSuffix in (self.output_file+self.output_file_sandbox):
1046              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
815            file_list=file_list+output_file_num+' '
1047              txt += '\n'
1048              txt += '# check output file\n'
1049              txt += 'ls '+fileWithSuffix+'\n'
1050 <            txt += 'exe_result=$?\n'
1051 <            txt += 'if [ $exe_result -ne 0 ] ; then\n'
1052 <            txt += '   echo "ERROR: No output file to manage"\n'
822 <            txt += '   echo "JOB_EXIT_STATUS = $exe_result"\n'
823 <            txt += '   echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
824 <            txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
825 <            txt += '   rm -f $RUNTIME_AREA/$repo \n'
826 <            txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
827 <            txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
828 <            txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
829 <            ### OLI_DANIELE
1050 >            txt += 'ls_result=$?\n'
1051 >            txt += 'if [ $ls_result -ne 0 ] ; then\n'
1052 >            txt += '   echo "ERROR: Problem with output file"\n'
1053              if common.scheduler.boss_scheduler_name == 'condor_g':
1054                  txt += '    if [ $middleware == OSG ]; then \n'
1055                  txt += '        echo "prepare dummy output file"\n'
# Line 837 | Line 1060 | class Cmssw(JobType):
1060              txt += 'fi\n'
1061        
1062          txt += 'cd $RUNTIME_AREA\n'
1063 <        file_list=file_list[:-1]
841 <        txt += 'file_list="'+file_list+'"\n'
1063 >        txt += 'cd $RUNTIME_AREA\n'
1064          ### OLI_DANIELE
1065          txt += 'if [ $middleware == OSG ]; then\n'  
1066          txt += '    cd $RUNTIME_AREA\n'
# Line 851 | Line 1073 | class Cmssw(JobType):
1073          txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
1074          txt += '        rm -f $RUNTIME_AREA/$repo \n'
1075          txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
854        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1076          txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1077          txt += '    fi\n'
1078          txt += 'fi\n'
1079          txt += '\n'
1080 +
1081 +        file_list = ''
1082 +        ## Add to filelist only files to be possibly copied to SE
1083 +        for fileWithSuffix in self.output_file:
1084 +            output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
1085 +            file_list=file_list+output_file_num+' '
1086 +        file_list=file_list[:-1]
1087 +        txt += 'file_list="'+file_list+'"\n'
1088 +
1089          return txt
1090  
1091      def numberFile_(self, file, txt):
# Line 870 | Line 1100 | class Cmssw(JobType):
1100          # add "_txt"
1101          if len(p)>1:
1102            ext = p[len(p)-1]
873          #result = name + '_' + str(txt) + "." + ext
1103            result = name + '_' + txt + "." + ext
1104          else:
876          #result = name + '_' + str(txt)
1105            result = name + '_' + txt
1106          
1107          return result
1108  
1109 <    def getRequirements(self):
1109 >    def getRequirements(self, nj=[]):
1110          """
1111          return job requirements to add to jdl files
1112          """
1113          req = ''
1114 <        if common.analisys_common_info['sw_version']:
1114 >        if self.version:
1115              req='Member("VO-cms-' + \
1116 <                 common.analisys_common_info['sw_version'] + \
1116 >                 self.version + \
1117                   '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1118 <        if common.analisys_common_info['sites']:
1119 <            if len(common.analisys_common_info['sites'])>0:
1120 <                req = req + ' && ('
893 <                for i in range(len(common.analisys_common_info['sites'])):
894 <                    req = req + 'other.GlueCEInfoHostName == "' \
895 <                         + common.analisys_common_info['sites'][i] + '"'
896 <                    if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
897 <                        req = req + ' || '
898 <            req = req + ')'
899 <        #print "req = ", req
1118 >
1119 >        req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
1120 >
1121          return req
1122  
1123      def configFilename(self):
# Line 914 | Line 1135 | class Cmssw(JobType):
1135          txt += '   if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
1136          txt += '      # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
1137          txt += '       source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
1138 <        txt += '   elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
1139 <        txt += '      # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
1140 <        txt += '       source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
1138 >        txt += '   elif [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n'
1139 >        txt += '      # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n'
1140 >        txt += '       source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n'
1141          txt += '   else\n'
1142 <        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1142 >        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1143          txt += '       echo "JOB_EXIT_STATUS = 10020"\n'
1144          txt += '       echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1145          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1146          txt += '       rm -f $RUNTIME_AREA/$repo \n'
1147          txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
927        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1148          txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1149          txt += '       exit 1\n'
1150          txt += '\n'
# Line 932 | Line 1152 | class Cmssw(JobType):
1152          txt += '       cd $RUNTIME_AREA\n'
1153          txt += '       /bin/rm -rf $WORKING_DIR\n'
1154          txt += '       if [ -d $WORKING_DIR ] ;then\n'
1155 <        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1155 >        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1156          txt += '            echo "JOB_EXIT_STATUS = 10017"\n'
1157          txt += '            echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1158          txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
1159          txt += '            rm -f $RUNTIME_AREA/$repo \n'
1160          txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
941        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1161          txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1162          txt += '       fi\n'
1163          txt += '\n'
# Line 965 | Line 1184 | class Cmssw(JobType):
1184          txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1185          txt += '       rm -f $RUNTIME_AREA/$repo \n'
1186          txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
968        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1187          txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1188          txt += '       exit 1\n'
1189          txt += '   else\n'
# Line 977 | Line 1195 | class Cmssw(JobType):
1195          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1196          txt += '           rm -f $RUNTIME_AREA/$repo \n'
1197          txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
980        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1198          txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1199          txt += '           exit 1\n'
1200          txt += '       fi\n'
# Line 991 | Line 1208 | class Cmssw(JobType):
1208          txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1209          txt += '           rm -f $RUNTIME_AREA/$repo \n'
1210          txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
994        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1211          txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1212          txt += '           exit 1\n'
1213          txt += '       fi\n'
1214          txt += '   fi\n'
1215          txt += '   \n'
1000        txt += '   string=`cat /etc/redhat-release`\n'
1001        txt += '   echo $string\n'
1002        txt += '   if [[ $string = *alhalla* ]]; then\n'
1003        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1004        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
1005        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
1006        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1007        txt += '   else\n'
1008        txt += '       echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n'
1009        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
1010        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
1011        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1012        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1013        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1014        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1015        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1016        txt += '       exit 1\n'
1017        txt += '   fi\n'
1216          txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1217          txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1218          return txt
# Line 1030 | Line 1228 | class Cmssw(JobType):
1228          
1229      def getTaskid(self):
1230          return self._taskId
1231 +
1232 + #######################################################################
1233 +    def uniquelist(self, old):
1234 +        """
1235 +        remove duplicates from a list
1236 +        """
1237 +        nd={}
1238 +        for e in old:
1239 +            nd[e]=0
1240 +        return nd.keys()

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines