ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.41 by slacapra, Wed Sep 20 17:29:52 2006 UTC vs.
Revision 1.73 by gutsche, Sun Apr 8 18:39:51 2007 UTC

# Line 2 | Line 2 | from JobType import JobType
2   from crab_logger import Logger
3   from crab_exceptions import *
4   from crab_util import *
5 import math
5   import common
6   import PsetManipulator  
8
9 import DBSInfo
7   import DataDiscovery
8 + import DataDiscovery_DBS2
9   import DataLocation
10   import Scram
11  
12 < import os, string, re
12 > import os, string, re, shutil, glob
13  
14   class Cmssw(JobType):
15      def __init__(self, cfg_params, ncjobs):
16          JobType.__init__(self, 'CMSSW')
17          common.logger.debug(3,'CMSSW::__init__')
18  
21        self.analisys_common_info = {}
19          # Marco.
20          self._params = {}
21          self.cfg_params = cfg_params
22  
23 <        # number of jobs requested to be created, limit ojb splitting
23 >        try:
24 >            self.MaxTarBallSize = float(self.cfg_params['EDG.maxtarballsize'])
25 >        except KeyError:
26 >            self.MaxTarBallSize = 100.0
27 >
28 >        # number of jobs requested to be created, limit obj splitting
29          self.ncjobs = ncjobs
30  
31          log = common.logger
32          
33          self.scram = Scram.Scram(cfg_params)
32        scramArea = ''
34          self.additional_inbox_files = []
35          self.scriptExe = ''
36          self.executable = ''
37 +        self.executable_arch = self.scram.getArch()
38          self.tgz_name = 'default.tgz'
39 +        self.scriptName = 'CMSSW.sh'
40 +        self.pset = ''      #scrip use case Da  
41 +        self.datasetPath = '' #scrip use case Da
42  
43 +        # set FJR file name
44 +        self.fjrFileName = 'crab_fjr.xml'
45  
46          self.version = self.scram.getSWVersion()
47 +        common.taskDB.setDict('codeVersion',self.version)
48          self.setParam_('application', self.version)
41        common.analisys_common_info['sw_version'] = self.version
42        ### FEDE
43        common.analisys_common_info['copy_input_data'] = 0
44        common.analisys_common_info['events_management'] = 1
49  
50          ### collect Data cards
51 +
52 +        ## get DBS mode
53 +        try:
54 +            self.use_dbs_2 = int(self.cfg_params['CMSSW.use_dbs_2'])
55 +        except KeyError:
56 +            self.use_dbs_2 = 0
57 +            
58          try:
59              tmp =  cfg_params['CMSSW.datasetpath']
60              log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
# Line 89 | Line 100 | class Cmssw(JobType):
100          try:
101              self.pset = cfg_params['CMSSW.pset']
102              log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
103 <            if (not os.path.exists(self.pset)):
104 <                raise CrabException("User defined PSet file "+self.pset+" does not exist")
103 >            if self.pset.lower() != 'none' :
104 >                if (not os.path.exists(self.pset)):
105 >                    raise CrabException("User defined PSet file "+self.pset+" does not exist")
106 >            else:
107 >                self.pset = None
108          except KeyError:
109              raise CrabException("PSet file missing. Cannot run cmsRun ")
110  
111          # output files
112 +        ## stuff which must be returned always via sandbox
113 +        self.output_file_sandbox = []
114 +
115 +        # add fjr report by default via sandbox
116 +        self.output_file_sandbox.append(self.fjrFileName)
117 +
118 +        # other output files to be returned via sandbox or copied to SE
119          try:
120              self.output_file = []
100
121              tmp = cfg_params['CMSSW.output_file']
122              if tmp != '':
123                  tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
# Line 107 | Line 127 | class Cmssw(JobType):
127                      self.output_file.append(tmp)
128                      pass
129              else:
130 <                log.message("No output file defined: only stdout/err will be available")
130 >                log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available")
131                  pass
132              pass
133          except KeyError:
134 <            log.message("No output file defined: only stdout/err will be available")
134 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available")
135              pass
136  
137          # script_exe file as additional file in inputSandbox
138          try:
139              self.scriptExe = cfg_params['USER.script_exe']
120            self.additional_inbox_files.append(self.scriptExe)
140              if self.scriptExe != '':
141                 if not os.path.isfile(self.scriptExe):
142 <                  msg ="WARNING. file "+self.scriptExe+" not found"
142 >                  msg ="ERROR. file "+self.scriptExe+" not found"
143                    raise CrabException(msg)
144 +               self.additional_inbox_files.append(string.strip(self.scriptExe))
145          except KeyError:
146 <           pass
147 <                  
146 >            self.scriptExe = ''
147 >
148 >        #CarlosDaniele
149 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
150 >           msg ="Error. script_exe  not defined"
151 >           raise CrabException(msg)
152 >
153          ## additional input files
154          try:
155              tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
156              for tmp in tmpAddFiles:
157 <                if not os.path.exists(tmp):
158 <                    raise CrabException("Additional input file not found: "+tmp)
159 <                self.additional_inbox_files.append(string.strip(tmp))
157 >                tmp = string.strip(tmp)
158 >                dirname = ''
159 >                if not tmp[0]=="/": dirname = "."
160 >                files = glob.glob(os.path.join(dirname, tmp))
161 >                for file in files:
162 >                    if not os.path.exists(file):
163 >                        raise CrabException("Additional input file not found: "+file)
164 >                    pass
165 >                    storedFile = common.work_space.shareDir()+file
166 >                    shutil.copyfile(file, storedFile)
167 >                    self.additional_inbox_files.append(string.strip(storedFile))
168                  pass
169              pass
170 +            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
171          except KeyError:
172              pass
173  
# Line 167 | Line 201 | class Cmssw(JobType):
201              self.total_number_of_events = 0
202              self.selectTotalNumberEvents = 0
203  
204 <        if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
205 <            msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
206 <            raise CrabException(msg)
204 >        if self.pset != None: #CarlosDaniele
205 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
206 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
207 >                 raise CrabException(msg)
208 >        else:
209 >             if (self.selectNumberOfJobs == 0):
210 >                 msg = 'Must specify  number_of_jobs.'
211 >                 raise CrabException(msg)
212  
213          ## source seed for pythia
214          try:
# Line 183 | Line 222 | class Cmssw(JobType):
222          except KeyError:
223              self.sourceSeedVtx = None
224              common.logger.debug(5,"No vertex seed given")
225 <
226 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
225 >        try:
226 >            self.firstRun = int(cfg_params['CMSSW.first_run'])
227 >        except KeyError:
228 >            self.firstRun = None
229 >            common.logger.debug(5,"No first run given")
230 >        if self.pset != None: #CarlosDaniele
231 >            self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
232  
233          #DBSDLS-start
234          ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
# Line 201 | Line 245 | class Cmssw(JobType):
245          self.tgzNameWithPath = self.getTarBall(self.executable)
246      
247          ## Select Splitting
248 <        if self.selectNoInput: self.jobSplittingNoInput()
249 <        else: self.jobSplittingByBlocks(blockSites)
248 >        if self.selectNoInput:
249 >            if self.pset == None: #CarlosDaniele
250 >                self.jobSplittingForScript()
251 >            else:
252 >                self.jobSplittingNoInput()
253 >        else:
254 >            self.jobSplittingByBlocks(blockSites)
255  
256          # modify Pset
257 <        try:
258 <            if (self.datasetPath): # standard job
259 <                # allow to processa a fraction of events in a file
260 <                self.PsetEdit.inputModule("INPUT")
261 <                self.PsetEdit.maxEvent("INPUTMAXEVENTS")
262 <                self.PsetEdit.skipEvent("INPUTSKIPEVENTS")
263 <
264 <            else:  # pythia like job
265 <                self.PsetEdit.maxEvent(self.eventsPerJob)
266 <                if (self.sourceSeed) :
267 <                    self.PsetEdit.pythiaSeed("INPUT")
268 <                    if (self.sourceSeedVtx) :
269 <                        self.PsetEdit.pythiaSeedVtx("INPUTVTX")
270 <            self.PsetEdit.psetWriter(self.configFilename())
271 <        except:
272 <            msg='Error while manipuliating ParameterSet: exiting...'
273 <            raise CrabException(msg)
257 >        if self.pset != None: #CarlosDaniele
258 >            try:
259 >                if (self.datasetPath): # standard job
260 >                    # allow to processa a fraction of events in a file
261 >                    self.PsetEdit.inputModule("INPUT")
262 >                    self.PsetEdit.maxEvent("INPUTMAXEVENTS")
263 >                    self.PsetEdit.skipEvent("INPUTSKIPEVENTS")
264 >                else:  # pythia like job
265 >                    self.PsetEdit.maxEvent(self.eventsPerJob)
266 >                    if (self.firstRun):
267 >                        self.PsetEdit.pythiaFirstRun("INPUTFIRSTRUN")  #First Run
268 >                    if (self.sourceSeed) :
269 >                        self.PsetEdit.pythiaSeed("INPUT")
270 >                        if (self.sourceSeedVtx) :
271 >                            self.PsetEdit.pythiaSeedVtx("INPUTVTX")
272 >                # add FrameworkJobReport to parameter-set
273 >                self.PsetEdit.addCrabFJR(self.fjrFileName)
274 >                self.PsetEdit.psetWriter(self.configFilename())
275 >            except:
276 >                msg='Error while manipuliating ParameterSet: exiting...'
277 >                raise CrabException(msg)
278  
279      def DataDiscoveryAndLocation(self, cfg_params):
280  
# Line 229 | Line 282 | class Cmssw(JobType):
282  
283          datasetPath=self.datasetPath
284  
232        ## TODO
233        dataTiersList = ""
234        dataTiers = dataTiersList.split(',')
235
285          ## Contact the DBS
286          common.logger.message("Contacting DBS...")
287          try:
288 <            self.pubdata=DataDiscovery.DataDiscovery(datasetPath, dataTiers, cfg_params)
288 >
289 >            if self.use_dbs_2 == 1 :
290 >                self.pubdata=DataDiscovery_DBS2.DataDiscovery_DBS2(datasetPath, cfg_params)
291 >            else :
292 >                self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
293              self.pubdata.fetchDBSInfo()
294  
295          except DataDiscovery.NotExistingDatasetError, ex :
296              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
297              raise CrabException(msg)
245
298          except DataDiscovery.NoDataTierinProvenanceError, ex :
299              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
300              raise CrabException(msg)
301          except DataDiscovery.DataDiscoveryError, ex:
302 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
302 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
303 >            raise CrabException(msg)
304 >        except DataDiscovery_DBS2.NotExistingDatasetError_DBS2, ex :
305 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
306 >            raise CrabException(msg)
307 >        except DataDiscovery_DBS2.NoDataTierinProvenanceError_DBS2, ex :
308 >            msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
309 >            raise CrabException(msg)
310 >        except DataDiscovery_DBS2.DataDiscoveryError_DBS2, ex:
311 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
312              raise CrabException(msg)
313  
314          ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
254        ## self.DBSPaths=self.pubdata.getDBSPaths()
315          common.logger.message("Required data are :"+self.datasetPath)
316  
317          self.filesbyblock=self.pubdata.getFiles()
318          self.eventsbyblock=self.pubdata.getEventsPerBlock()
319          self.eventsbyfile=self.pubdata.getEventsPerFile()
260        # print str(self.filesbyblock)
261        # print 'self.eventsbyfile',len(self.eventsbyfile)
262        # print str(self.eventsbyfile)
320  
321          ## get max number of events
322          self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
323 <        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
323 >        common.logger.message("The number of available events is %s\n"%self.maxEvents)
324  
325          common.logger.message("Contacting DLS...")
326          ## Contact the DLS and build a list of sites hosting the fileblocks
# Line 278 | Line 335 | class Cmssw(JobType):
335          sites = dataloc.getSites()
336          allSites = []
337          listSites = sites.values()
338 <        for list in listSites:
339 <            for oneSite in list:
338 >        for listSite in listSites:
339 >            for oneSite in listSite:
340                  allSites.append(oneSite)
341          allSites = self.uniquelist(allSites)
342  
# Line 350 | Line 407 | class Cmssw(JobType):
407          # ---- we've met the requested total # of events    ---- #
408          while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
409              block = blocks[blockCount]
410 <
354 <
355 <            evInBlock = self.eventsbyblock[block]
356 <            common.logger.debug(5,'Events in Block File '+str(evInBlock))
357 <
358 <            #Correct - switch to this when DBS up
359 <            #numEventsInBlock = self.eventsbyblock[block]
360 <            numEventsInBlock = evInBlock
410 >            blockCount += 1
411              
412 <            files = self.filesbyblock[block]
413 <            numFilesInBlock = len(files)
414 <            if (numFilesInBlock <= 0):
365 <                continue
366 <            fileCount = 0
367 <
368 <            # ---- New block => New job ---- #
369 <            parString = "\\{"
370 <            # counter for number of events in files currently worked on
371 <            filesEventCount = 0
372 <            # flag if next while loop should touch new file
373 <            newFile = 1
374 <            # job event counter
375 <            jobSkipEventCount = 0
412 >            if self.eventsbyblock.has_key(block) :
413 >                numEventsInBlock = self.eventsbyblock[block]
414 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
415              
416 <            # ---- Iterate over the files in the block until we've met the requested ---- #
417 <            # ---- total # of events or we've gone over all the files in this block  ---- #
418 <            while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
419 <                file = files[fileCount]
420 <                if newFile :
421 <                    try:
422 <                        numEventsInFile = self.eventsbyfile[file]
423 <                        common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
424 <                        # increase filesEventCount
425 <                        filesEventCount += numEventsInFile
426 <                        # Add file to current job
427 <                        parString += '\\\"' + file + '\\\"\,'
428 <                        newFile = 0
429 <                    except KeyError:
430 <                        common.logger.message("File "+str(file)+" has unknown numbe of events: skipping")
416 >                files = self.filesbyblock[block]
417 >                numFilesInBlock = len(files)
418 >                if (numFilesInBlock <= 0):
419 >                    continue
420 >                fileCount = 0
421 >
422 >                # ---- New block => New job ---- #
423 >                parString = "\\{"
424 >                # counter for number of events in files currently worked on
425 >                filesEventCount = 0
426 >                # flag if next while loop should touch new file
427 >                newFile = 1
428 >                # job event counter
429 >                jobSkipEventCount = 0
430 >            
431 >                # ---- Iterate over the files in the block until we've met the requested ---- #
432 >                # ---- total # of events or we've gone over all the files in this block  ---- #
433 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
434 >                    file = files[fileCount]
435 >                    if newFile :
436 >                        try:
437 >                            numEventsInFile = self.eventsbyfile[file]
438 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
439 >                            # increase filesEventCount
440 >                            filesEventCount += numEventsInFile
441 >                            # Add file to current job
442 >                            parString += '\\\"' + file + '\\\"\,'
443 >                            newFile = 0
444 >                        except KeyError:
445 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
446                          
447  
448 <                # if less events in file remain than eventsPerJobRequested
449 <                if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) :
450 <                    # if last file in block
451 <                    if ( fileCount == numFilesInBlock ) :
452 <                        # end job using last file, use remaining events in block
448 >                    # if less events in file remain than eventsPerJobRequested
449 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested ) :
450 >                        # if last file in block
451 >                        if ( fileCount == numFilesInBlock-1 ) :
452 >                            # end job using last file, use remaining events in block
453 >                            # close job and touch new file
454 >                            fullString = parString[:-2]
455 >                            fullString += '\\}'
456 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
457 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
458 >                            self.jobDestination.append(blockSites[block])
459 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
460 >                            # reset counter
461 >                            jobCount = jobCount + 1
462 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
463 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
464 >                            jobSkipEventCount = 0
465 >                            # reset file
466 >                            parString = "\\{"
467 >                            filesEventCount = 0
468 >                            newFile = 1
469 >                            fileCount += 1
470 >                        else :
471 >                            # go to next file
472 >                            newFile = 1
473 >                            fileCount += 1
474 >                    # if events in file equal to eventsPerJobRequested
475 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
476                          # close job and touch new file
477                          fullString = parString[:-2]
478                          fullString += '\\}'
479 <                        list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
480 <                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
479 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
480 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
481                          self.jobDestination.append(blockSites[block])
482                          common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
483                          # reset counter
# Line 413 | Line 490 | class Cmssw(JobType):
490                          filesEventCount = 0
491                          newFile = 1
492                          fileCount += 1
493 +                        
494 +                    # if more events in file remain than eventsPerJobRequested
495                      else :
496 <                        # go to next file
497 <                        newFile = 1
498 <                        fileCount += 1
499 <                # if events in file equal to eventsPerJobRequested
500 <                elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
501 <                    # close job and touch new file
502 <                    fullString = parString[:-2]
503 <                    fullString += '\\}'
504 <                    list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
505 <                    common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
506 <                    self.jobDestination.append(blockSites[block])
507 <                    common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
508 <                    # reset counter
509 <                    jobCount = jobCount + 1
510 <                    totalEventCount = totalEventCount + eventsPerJobRequested
511 <                    eventsRemaining = eventsRemaining - eventsPerJobRequested
512 <                    jobSkipEventCount = 0
513 <                    # reset file
514 <                    parString = "\\{"
515 <                    filesEventCount = 0
437 <                    newFile = 1
438 <                    fileCount += 1
439 <                    
440 <                # if more events in file remain than eventsPerJobRequested
441 <                else :
442 <                    # close job but don't touch new file
443 <                    fullString = parString[:-2]
444 <                    fullString += '\\}'
445 <                    list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
446 <                    common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
447 <                    self.jobDestination.append(blockSites[block])
448 <                    common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
449 <                    # increase counter
450 <                    jobCount = jobCount + 1
451 <                    totalEventCount = totalEventCount + eventsPerJobRequested
452 <                    eventsRemaining = eventsRemaining - eventsPerJobRequested
453 <                    # calculate skip events for last file
454 <                    # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
455 <                    jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
456 <                    # remove all but the last file
457 <                    filesEventCount = self.eventsbyfile[file]
458 <                    parString = "\\{"
459 <                    parString += '\\\"' + file + '\\\"\,'
460 <                pass # END if
461 <            pass # END while (iterate over files in the block)
496 >                        # close job but don't touch new file
497 >                        fullString = parString[:-2]
498 >                        fullString += '\\}'
499 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
500 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
501 >                        self.jobDestination.append(blockSites[block])
502 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
503 >                        # increase counter
504 >                        jobCount = jobCount + 1
505 >                        totalEventCount = totalEventCount + eventsPerJobRequested
506 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
507 >                        # calculate skip events for last file
508 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
509 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
510 >                        # remove all but the last file
511 >                        filesEventCount = self.eventsbyfile[file]
512 >                        parString = "\\{"
513 >                        parString += '\\\"' + file + '\\\"\,'
514 >                    pass # END if
515 >                pass # END while (iterate over files in the block)
516          pass # END while (iterate over blocks in the dataset)
517          self.ncjobs = self.total_number_of_jobs = jobCount
518          if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
# Line 482 | Line 536 | class Cmssw(JobType):
536              raise CrabException(msg)
537  
538          if (self.selectEventsPerJob):
539 <            self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
539 >            if (self.selectTotalNumberEvents):
540 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
541 >            elif(self.selectNumberOfJobs) :  
542 >                self.total_number_of_jobs =self.theNumberOfJobs
543 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
544 >
545          elif (self.selectNumberOfJobs) :
546              self.total_number_of_jobs = self.theNumberOfJobs
547              self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
548 <
548 >
549          common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
550  
551          # is there any remainder?
# Line 502 | Line 561 | class Cmssw(JobType):
561          self.list_of_args = []
562          for i in range(self.total_number_of_jobs):
563              ## Since there is no input, any site is good
564 <            self.jobDestination.append(["Any"])
564 >           # self.jobDestination.append(["Any"])
565 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
566 >            args=''
567 >            if (self.firstRun):
568 >                    ## pythia first run
569 >                #self.list_of_args.append([(str(self.firstRun)+str(i))])
570 >                args=args+(str(self.firstRun)+str(i))
571 >            else:
572 >                ## no first run
573 >                #self.list_of_args.append([str(i)])
574 >                args=args+str(i)
575              if (self.sourceSeed):
576                  if (self.sourceSeedVtx):
577                      ## pythia + vtx random seed
578 <                    self.list_of_args.append([
579 <                                              str(self.sourceSeed)+str(i),
580 <                                              str(self.sourceSeedVtx)+str(i)
581 <                                              ])
578 >                    #self.list_of_args.append([
579 >                    #                          str(self.sourceSeed)+str(i),
580 >                    #                          str(self.sourceSeedVtx)+str(i)
581 >                    #                          ])
582 >                    args=args+str(',')+str(self.sourceSeed)+str(i)+str(',')+str(self.sourceSeedVtx)+str(i)
583                  else:
584                      ## only pythia random seed
585 <                    self.list_of_args.append([(str(self.sourceSeed)+str(i))])
585 >                    #self.list_of_args.append([(str(self.sourceSeed)+str(i))])
586 >                    args=args +str(',')+str(self.sourceSeed)+str(i)
587              else:
588                  ## no random seed
589 <                self.list_of_args.append([str(i)])
590 <        #print self.list_of_args
589 >                if str(args)=='': args=args+(str(self.firstRun)+str(i))
590 >            arguments=args.split(',')
591 >            if len(arguments)==3:self.list_of_args.append([str(arguments[0]),str(arguments[1]),str(arguments[2])])
592 >            elif len(arguments)==2:self.list_of_args.append([str(arguments[0]),str(arguments[1])])
593 >            else :self.list_of_args.append([str(arguments[0])])
594 >            
595 >     #   print self.list_of_args
596  
597          return
598  
599 +
600 +    def jobSplittingForScript(self):#CarlosDaniele
601 +        """
602 +        Perform job splitting based on number of job
603 +        """
604 +        common.logger.debug(5,'Splitting per job')
605 +        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
606 +
607 +        self.total_number_of_jobs = self.theNumberOfJobs
608 +
609 +        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
610 +
611 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
612 +
613 +        # argument is seed number.$i
614 +        self.list_of_args = []
615 +        for i in range(self.total_number_of_jobs):
616 +            ## Since there is no input, any site is good
617 +           # self.jobDestination.append(["Any"])
618 +            self.jobDestination.append([""])
619 +            ## no random seed
620 +            self.list_of_args.append([str(i)])
621 +        return
622 +
623      def split(self, jobParams):
624  
625          common.jobDB.load()
# Line 557 | Line 657 | class Cmssw(JobType):
657          """
658          
659          # if it exist, just return it
660 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
660 >        #
661 >        # Marco. Let's start to use relative path for Boss XML files
662 >        #
663 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
664          if os.path.exists(self.tgzNameWithPath):
665              return self.tgzNameWithPath
666  
# Line 571 | Line 674 | class Cmssw(JobType):
674          # First of all declare the user Scram area
675          swArea = self.scram.getSWArea_()
676          #print "swArea = ", swArea
677 <        swVersion = self.scram.getSWVersion()
678 <        #print "swVersion = ", swVersion
677 >        # swVersion = self.scram.getSWVersion()
678 >        # print "swVersion = ", swVersion
679          swReleaseTop = self.scram.getReleaseTop_()
680          #print "swReleaseTop = ", swReleaseTop
681          
# Line 580 | Line 683 | class Cmssw(JobType):
683          if swReleaseTop == '' or swArea == swReleaseTop:
684              return
685  
686 <        filesToBeTarred = []
687 <        ## First find the executable
688 <        if (self.executable != ''):
689 <            exeWithPath = self.scram.findFile_(executable)
690 < #           print exeWithPath
691 <            if ( not exeWithPath ):
692 <                raise CrabException('User executable '+executable+' not found')
693 <
694 <            ## then check if it's private or not
695 <            if exeWithPath.find(swReleaseTop) == -1:
696 <                # the exe is private, so we must ship
697 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
698 <                path = swArea+'/'
699 <                exe = string.replace(exeWithPath, path,'')
700 <                filesToBeTarred.append(exe)
701 <                pass
702 <            else:
703 <                # the exe is from release, we'll find it on WN
704 <                pass
705 <
706 <        ## Now get the libraries: only those in local working area
707 <        libDir = 'lib'
708 <        lib = swArea+'/' +libDir
709 <        common.logger.debug(5,"lib "+lib+" to be tarred")
710 <        if os.path.exists(lib):
711 <            filesToBeTarred.append(libDir)
712 <
713 <        ## Now check if module dir is present
714 <        moduleDir = 'module'
715 <        if os.path.isdir(swArea+'/'+moduleDir):
716 <            filesToBeTarred.append(moduleDir)
717 <
718 <        ## Now check if the Data dir is present
719 <        dataDir = 'src/Data/'
720 <        if os.path.isdir(swArea+'/'+dataDir):
721 <            filesToBeTarred.append(dataDir)
722 <
723 <        ## Create the tar-ball
724 <        if len(filesToBeTarred)>0:
725 <            cwd = os.getcwd()
726 <            os.chdir(swArea)
727 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
728 <            for line in filesToBeTarred:
729 <                tarcmd = tarcmd + line + ' '
730 <            cout = runCommand(tarcmd)
731 <            if not cout:
732 <                raise CrabException('Could not create tar-ball')
733 <            os.chdir(cwd)
734 <        else:
735 <            common.logger.debug(5,"No files to be to be tarred")
686 >        import tarfile
687 >        try: # create tar ball
688 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
689 >            ## First find the executable
690 >            if (executable != ''):
691 >                exeWithPath = self.scram.findFile_(executable)
692 >                if ( not exeWithPath ):
693 >                    raise CrabException('User executable '+executable+' not found')
694 >    
695 >                ## then check if it's private or not
696 >                if exeWithPath.find(swReleaseTop) == -1:
697 >                    # the exe is private, so we must ship
698 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
699 >                    path = swArea+'/'
700 >                    exe = string.replace(exeWithPath, path,'')
701 >                    tar.add(path+exe,executable)
702 >                    pass
703 >                else:
704 >                    # the exe is from release, we'll find it on WN
705 >                    pass
706 >    
707 >            ## Now get the libraries: only those in local working area
708 >            libDir = 'lib'
709 >            lib = swArea+'/' +libDir
710 >            common.logger.debug(5,"lib "+lib+" to be tarred")
711 >            if os.path.exists(lib):
712 >                tar.add(lib,libDir)
713 >    
714 >            ## Now check if module dir is present
715 >            moduleDir = 'module'
716 >            module = swArea + '/' + moduleDir
717 >            if os.path.isdir(module):
718 >                tar.add(module,moduleDir)
719 >
720 >            ## Now check if any data dir(s) is present
721 >            swAreaLen=len(swArea)
722 >            for root, dirs, files in os.walk(swArea):
723 >                if "data" in dirs:
724 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
725 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
726 >
727 >            ## Add ProdAgent dir to tar
728 >            paDir = 'ProdAgentApi'
729 >            pa = os.environ['CRABDIR'] + '/' + 'ProdAgentApi'
730 >            if os.path.isdir(pa):
731 >                tar.add(pa,paDir)
732 >        
733 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
734 >            tar.close()
735 >        except :
736 >            raise CrabException('Could not create tar-ball')
737 >
738 >        ## check for tarball size
739 >        tarballinfo = os.stat(self.tgzNameWithPath)
740 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
741 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
742 >
743 >        ## create tar-ball with ML stuff
744 >        self.MLtgzfile =  common.work_space.pathForTgz()+'share/MLfiles.tgz'
745 >        try:
746 >            tar = tarfile.open(self.MLtgzfile, "w:gz")
747 >            path=os.environ['CRABDIR'] + '/python/'
748 >            for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py']:
749 >                tar.add(path+file,file)
750 >            common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames()))
751 >            tar.close()
752 >        except :
753 >            raise CrabException('Could not create ML files tar-ball')
754          
755          return
756          
# Line 646 | Line 767 | class Cmssw(JobType):
767          txt += 'if [ $middleware == LCG ]; then \n'
768          txt += self.wsSetupCMSLCGEnvironment_()
769          txt += 'elif [ $middleware == OSG ]; then\n'
770 <        txt += '    time=`date -u +"%s"`\n'
771 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
651 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
652 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
770 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
771 >        txt += '    echo "Created working directory: $WORKING_DIR"\n'
772          txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
773          txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
774          txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
# Line 698 | Line 817 | class Cmssw(JobType):
817          txt += '   exit 1 \n'
818          txt += 'fi \n'
819          txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
820 +        txt += 'export SCRAM_ARCH='+self.executable_arch+'\n'
821          txt += 'cd '+self.version+'\n'
822          ### needed grep for bug in scramv1 ###
823 +        txt += scram+' runtime -sh\n'
824          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
825 +        txt += 'echo $PATH\n'
826  
827          # Handle the arguments:
828          txt += "\n"
# Line 737 | Line 859 | class Cmssw(JobType):
859  
860          # Prepare job-specific part
861          job = common.job_list[nj]
862 <        pset = os.path.basename(job.configFilename())
863 <        txt += '\n'
864 <        if (self.datasetPath): # standard job
865 <            #txt += 'InputFiles=$2\n'
866 <            txt += 'InputFiles=${args[1]}\n'
867 <            txt += 'MaxEvents=${args[2]}\n'
868 <            txt += 'SkipEvents=${args[3]}\n'
869 <            txt += 'echo "Inputfiles:<$InputFiles>"\n'
870 <            txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset_tmp_1.cfg\n'
871 <            txt += 'echo "MaxEvents:<$MaxEvents>"\n'
872 <            txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" $RUNTIME_AREA/ pset_tmp_1.cfg > pset_tmp_2.cfg\n'
873 <            txt += 'echo "SkipEvents:<$SkipEvents>"\n'
874 <            txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" $RUNTIME_AREA/ pset_tmp_2.cfg > pset.cfg\n'
875 <        else:  # pythia like job
876 <            if (self.sourceSeed):
877 < #                txt += 'Seed=$2\n'
878 <                txt += 'Seed=${args[1]}\n'
879 <                txt += 'echo "Seed: <$Seed>"\n'
880 <                txt += 'sed "s#\<INPUT\>#$Seed#" $RUNTIME_AREA/'+pset+' > tmp.cfg\n'
759 <                if (self.sourceSeedVtx):
760 < #                    txt += 'VtxSeed=$3\n'
761 <                    txt += 'VtxSeed=${args[2]}\n'
762 <                    txt += 'echo "VtxSeed: <$VtxSeed>"\n'
763 <                    txt += 'sed "s#INPUTVTX#$VtxSeed#" tmp.cfg > pset.cfg\n'
862 >        if self.pset != None: #CarlosDaniele
863 >            pset = os.path.basename(job.configFilename())
864 >            txt += '\n'
865 >            if (self.datasetPath): # standard job
866 >                #txt += 'InputFiles=$2\n'
867 >                txt += 'InputFiles=${args[1]}\n'
868 >                txt += 'MaxEvents=${args[2]}\n'
869 >                txt += 'SkipEvents=${args[3]}\n'
870 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
871 >                txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset_tmp_1.cfg\n'
872 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
873 >                txt += 'sed "s#INPUTMAXEVENTS#$MaxEvents#" pset_tmp_1.cfg > pset_tmp_2.cfg\n'
874 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
875 >                txt += 'sed "s#INPUTSKIPEVENTS#$SkipEvents#" pset_tmp_2.cfg > pset.cfg\n'
876 >            else:  # pythia like job
877 >                if (self.sourceSeed):
878 >                    txt += 'FirstRun=${args[1]}\n'
879 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
880 >                    txt += 'sed "s#\<INPUTFIRSTRUN\>#$FirstRun#" $RUNTIME_AREA/'+pset+' > tmp_1.cfg\n'
881                  else:
882 <                    txt += 'mv tmp.cfg pset.cfg\n'
883 <            else:
884 <                txt += '# Copy untouched pset\n'
885 <                txt += 'cp $RUNTIME_AREA/'+pset+' pset.cfg\n'
882 >                    txt += '# Copy untouched pset\n'
883 >                    txt += 'cp $RUNTIME_AREA/'+pset+' tmp_1.cfg\n'
884 >                if (self.sourceSeed):
885 > #                    txt += 'Seed=$2\n'
886 >                    txt += 'Seed=${args[2]}\n'
887 >                    txt += 'echo "Seed: <$Seed>"\n'
888 >                    txt += 'sed "s#\<INPUT\>#$Seed#" tmp_1.cfg > tmp_2.cfg\n'
889 >                    if (self.sourceSeedVtx):
890 > #                        txt += 'VtxSeed=$3\n'
891 >                        txt += 'VtxSeed=${args[3]}\n'
892 >                        txt += 'echo "VtxSeed: <$VtxSeed>"\n'
893 >                        txt += 'sed "s#INPUTVTX#$VtxSeed#" tmp_2.cfg > pset.cfg\n'
894 >                    else:
895 >                        txt += 'mv tmp_2.cfg pset.cfg\n'
896 >                else:
897 >                    txt += 'mv tmp_1.cfg pset.cfg\n'
898 >                   # txt += '# Copy untouched pset\n'
899 >                   # txt += 'cp $RUNTIME_AREA/'+pset+' pset.cfg\n'
900  
901  
902          if len(self.additional_inbox_files) > 0:
# Line 777 | Line 908 | class Cmssw(JobType):
908                  txt += 'fi\n'
909              pass
910  
911 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
912 <
913 <        txt += '\n'
914 <        txt += 'echo "***** cat pset.cfg *********"\n'
915 <        txt += 'cat pset.cfg\n'
916 <        txt += 'echo "****** end pset.cfg ********"\n'
917 <        txt += '\n'
918 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
919 <        # txt += 'cat pset1.cfg\n'
920 <        # txt += 'echo "****** end pset1.cfg ********"\n'
911 >        if self.pset != None: #CarlosDaniele
912 >            txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
913 >        
914 >            txt += '\n'
915 >            txt += 'echo "***** cat pset.cfg *********"\n'
916 >            txt += 'cat pset.cfg\n'
917 >            txt += 'echo "****** end pset.cfg ********"\n'
918 >            txt += '\n'
919 >            # txt += 'echo "***** cat pset1.cfg *********"\n'
920 >            # txt += 'cat pset1.cfg\n'
921 >            # txt += 'echo "****** end pset1.cfg ********"\n'
922          return txt
923  
924 <    def wsBuildExe(self, nj):
924 >    def wsBuildExe(self, nj=0):
925          """
926          Put in the script the commands to build an executable
927          or a library.
# Line 824 | Line 956 | class Cmssw(JobType):
956              txt += 'else \n'
957              txt += '   echo "Successful untar" \n'
958              txt += 'fi \n'
959 +            txt += '\n'
960 +            txt += 'echo "Include ProdAgentApi in PYTHONPATH"\n'
961 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
962 +            txt += '   export PYTHONPATH=ProdAgentApi\n'
963 +            txt += 'else\n'
964 +            txt += '   export PYTHONPATH=ProdAgentApi:${PYTHONPATH}\n'
965 +            txt += 'fi\n'
966 +            txt += '\n'
967 +
968              pass
969          
970          return txt
# Line 835 | Line 976 | class Cmssw(JobType):
976          """
977          
978      def executableName(self):
979 <        return self.executable
979 >        if self.scriptExe: #CarlosDaniele
980 >            return "sh "
981 >        else:
982 >            return self.executable
983  
984      def executableArgs(self):
985 <        return " -p pset.cfg"
985 >        if self.scriptExe:#CarlosDaniele
986 >            return   self.scriptExe + " $NJob"
987 >        else:
988 >            return " -p pset.cfg"
989  
990      def inputSandbox(self, nj):
991          """
992          Returns a list of filenames to be put in JDL input sandbox.
993          """
994          inp_box = []
995 <        # dict added to delete duplicate from input sandbox file list
996 <        seen = {}
995 >        # # dict added to delete duplicate from input sandbox file list
996 >        # seen = {}
997          ## code
998          if os.path.isfile(self.tgzNameWithPath):
999              inp_box.append(self.tgzNameWithPath)
1000 +        if os.path.isfile(self.MLtgzfile):
1001 +            inp_box.append(self.MLtgzfile)
1002          ## config
1003 <        inp_box.append(common.job_list[nj].configFilename())
1003 >        if not self.pset is None:
1004 >            inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename())
1005          ## additional input files
1006 <        #for file in self.additional_inbox_files:
1007 <        #    inp_box.append(common.work_space.cwdDir()+file)
1006 >        for file in self.additional_inbox_files:
1007 >            inp_box.append(file)
1008          return inp_box
1009  
1010      def outputSandbox(self, nj):
# Line 863 | Line 1013 | class Cmssw(JobType):
1013          """
1014          out_box = []
1015  
866        stdout=common.job_list[nj].stdout()
867        stderr=common.job_list[nj].stderr()
868
1016          ## User Declared output files
1017 <        for out in self.output_file:
1017 >        for out in (self.output_file+self.output_file_sandbox):
1018              n_out = nj + 1
1019              out_box.append(self.numberFile_(out,str(n_out)))
1020          return out_box
874        return []
1021  
1022      def prepareSteeringCards(self):
1023          """
# Line 887 | Line 1033 | class Cmssw(JobType):
1033          txt = '\n'
1034          txt += '# directory content\n'
1035          txt += 'ls \n'
1036 <        file_list = ''
1037 <        for fileWithSuffix in self.output_file:
1036 >
1037 >        for fileWithSuffix in (self.output_file+self.output_file_sandbox):
1038              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
893            file_list=file_list+output_file_num+' '
1039              txt += '\n'
1040              txt += '# check output file\n'
1041              txt += 'ls '+fileWithSuffix+'\n'
1042              txt += 'ls_result=$?\n'
898            #txt += 'exe_result=$?\n'
1043              txt += 'if [ $ls_result -ne 0 ] ; then\n'
1044              txt += '   echo "ERROR: Problem with output file"\n'
901            #txt += '   echo "JOB_EXIT_STATUS = $exe_result"\n'
902            #txt += '   echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
903            #txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
904            ### OLI_DANIELE
1045              if common.scheduler.boss_scheduler_name == 'condor_g':
1046                  txt += '    if [ $middleware == OSG ]; then \n'
1047                  txt += '        echo "prepare dummy output file"\n'
# Line 912 | Line 1052 | class Cmssw(JobType):
1052              txt += 'fi\n'
1053        
1054          txt += 'cd $RUNTIME_AREA\n'
915        file_list=file_list[:-1]
916        txt += 'file_list="'+file_list+'"\n'
1055          txt += 'cd $RUNTIME_AREA\n'
1056          ### OLI_DANIELE
1057          txt += 'if [ $middleware == OSG ]; then\n'  
# Line 931 | Line 1069 | class Cmssw(JobType):
1069          txt += '    fi\n'
1070          txt += 'fi\n'
1071          txt += '\n'
1072 +
1073 +        file_list = ''
1074 +        ## Add to filelist only files to be possibly copied to SE
1075 +        for fileWithSuffix in self.output_file:
1076 +            output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
1077 +            file_list=file_list+output_file_num+' '
1078 +        file_list=file_list[:-1]
1079 +        txt += 'file_list="'+file_list+'"\n'
1080 +
1081          return txt
1082  
1083      def numberFile_(self, file, txt):
# Line 951 | Line 1098 | class Cmssw(JobType):
1098          
1099          return result
1100  
1101 <    def getRequirements(self):
1101 >    def getRequirements(self, nj=[]):
1102          """
1103          return job requirements to add to jdl files
1104          """
1105          req = ''
1106 <        if common.analisys_common_info['sw_version']:
1106 >        if self.version:
1107              req='Member("VO-cms-' + \
1108 <                 common.analisys_common_info['sw_version'] + \
1108 >                 self.version + \
1109                   '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1110  
1111          req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
# Line 1058 | Line 1205 | class Cmssw(JobType):
1205          txt += '       fi\n'
1206          txt += '   fi\n'
1207          txt += '   \n'
1061        txt += '   string=`cat /etc/redhat-release`\n'
1062        txt += '   echo $string\n'
1063        txt += '   if [[ $string = *alhalla* ]]; then\n'
1064        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1065        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
1066        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
1067        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1068        txt += '   else\n'
1069        txt += '       echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n'
1070        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
1071        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
1072        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1073        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1074        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1075        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1076        txt += '       exit 1\n'
1077        txt += '   fi\n'
1208          txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1209          txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1210          return txt

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines