ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.33 by mkirn, Fri Jul 28 18:19:34 2006 UTC vs.
Revision 1.191 by spiga, Tue May 27 22:14:26 2008 UTC

# Line 2 | Line 2 | from JobType import JobType
2   from crab_logger import Logger
3   from crab_exceptions import *
4   from crab_util import *
5 < import math
5 > from BlackWhiteListParser import BlackWhiteListParser
6   import common
7 import PsetManipulator  
8
9 import DBSInfo_EDM
10 import DataDiscovery_EDM
11 import DataLocation_EDM
7   import Scram
8 + from LFNBaseName import *
9  
10 < import os, string, re
10 > import os, string, glob
11  
12   class Cmssw(JobType):
13 <    def __init__(self, cfg_params):
13 >    def __init__(self, cfg_params, ncjobs):
14          JobType.__init__(self, 'CMSSW')
15          common.logger.debug(3,'CMSSW::__init__')
16  
17 <        self.analisys_common_info = {}
18 <        # Marco.
17 >        self.argsList = []
18 >
19          self._params = {}
20          self.cfg_params = cfg_params
21 +        # init BlackWhiteListParser
22 +        self.blackWhiteListParser = BlackWhiteListParser(cfg_params)
23 +
24 +        self.MaxTarBallSize = float(self.cfg_params.get('EDG.maxtarballsize',9.5))
25 +
26 +        # number of jobs requested to be created, limit obj splitting
27 +        self.ncjobs = ncjobs
28 +
29          log = common.logger
30 <        
30 >
31          self.scram = Scram.Scram(cfg_params)
28        scramArea = ''
32          self.additional_inbox_files = []
33          self.scriptExe = ''
34          self.executable = ''
35 +        self.executable_arch = self.scram.getArch()
36          self.tgz_name = 'default.tgz'
37 +        self.scriptName = 'CMSSW.sh'
38 +        self.pset = ''  
39 +        self.datasetPath = ''
40  
41 +        # set FJR file name
42 +        self.fjrFileName = 'crab_fjr.xml'
43  
44          self.version = self.scram.getSWVersion()
45 <        self.setParam_('application', self.version)
46 <        common.analisys_common_info['sw_version'] = self.version
47 <        ### FEDE
48 <        common.analisys_common_info['copy_input_data'] = 0
40 <        common.analisys_common_info['events_management'] = 1
41 <
42 <        ### collect Data cards
45 >        version_array = self.version.split('_')
46 >        self.CMSSW_major = 0
47 >        self.CMSSW_minor = 0
48 >        self.CMSSW_patch = 0
49          try:
50 <            tmp =  cfg_params['CMSSW.datasetpath']
51 <            log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
52 <            if string.lower(tmp)=='none':
53 <                self.datasetPath = None
54 <                self.selectNoInput = 1
49 <            else:
50 <                self.datasetPath = tmp
51 <                self.selectNoInput = 0
52 <        except KeyError:
53 <            msg = "Error: datasetpath not defined "  
50 >            self.CMSSW_major = int(version_array[1])
51 >            self.CMSSW_minor = int(version_array[2])
52 >            self.CMSSW_patch = int(version_array[3])
53 >        except:
54 >            msg = "Cannot parse CMSSW version string: " + self.version + " for major and minor release number!"
55              raise CrabException(msg)
56  
57 <        # ML monitoring
57 <        # split dataset path style: /PreProdR3Minbias/SIM/GEN-SIM
58 <        if not self.datasetPath:
59 <            self.setParam_('dataset', 'None')
60 <            self.setParam_('owner', 'None')
61 <        else:
62 <            datasetpath_split = self.datasetPath.split("/")
63 <            self.setParam_('dataset', datasetpath_split[1])
64 <            self.setParam_('owner', datasetpath_split[-1])
57 >        ### collect Data cards
58  
59 <        self.setTaskid_()
60 <        self.setParam_('taskId', self.cfg_params['taskId'])
59 >        if not cfg_params.has_key('CMSSW.datasetpath'):
60 >            msg = "Error: datasetpath not defined "
61 >            raise CrabException(msg)
62 >        tmp =  cfg_params['CMSSW.datasetpath']
63 >        log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
64 >        if string.lower(tmp)=='none':
65 >            self.datasetPath = None
66 >            self.selectNoInput = 1
67 >        else:
68 >            self.datasetPath = tmp
69 >            self.selectNoInput = 0
70  
71          self.dataTiers = []
72 +
73 +        self.debug_pset = cfg_params.get('USER.debug_pset',False)
74  
75          ## now the application
76 <        try:
77 <            self.executable = cfg_params['CMSSW.executable']
74 <            self.setParam_('exe', self.executable)
75 <            log.debug(6, "CMSSW::CMSSW(): executable = "+self.executable)
76 <            msg = "Default executable cmsRun overridden. Switch to " + self.executable
77 <            log.debug(3,msg)
78 <        except KeyError:
79 <            self.executable = 'cmsRun'
80 <            self.setParam_('exe', self.executable)
81 <            msg = "User executable not defined. Use cmsRun"
82 <            log.debug(3,msg)
83 <            pass
76 >        self.executable = cfg_params.get('CMSSW.executable','cmsRun')
77 >        log.debug(6, "CMSSW::CMSSW(): executable = "+self.executable)
78  
79 <        try:
80 <            self.pset = cfg_params['CMSSW.pset']
81 <            log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
79 >        if not cfg_params.has_key('CMSSW.pset'):
80 >            raise CrabException("PSet file missing. Cannot run cmsRun ")
81 >        self.pset = cfg_params['CMSSW.pset']
82 >        log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
83 >        if self.pset.lower() != 'none' :
84              if (not os.path.exists(self.pset)):
85                  raise CrabException("User defined PSet file "+self.pset+" does not exist")
86 <        except KeyError:
87 <            raise CrabException("PSet file missing. Cannot run cmsRun ")
86 >        else:
87 >            self.pset = None
88  
89          # output files
90 <        try:
91 <            self.output_file = []
90 >        ## stuff which must be returned always via sandbox
91 >        self.output_file_sandbox = []
92  
93 <            tmp = cfg_params['CMSSW.output_file']
94 <            if tmp != '':
95 <                tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
96 <                log.debug(7, 'cmssw::cmssw(): output files '+str(tmpOutFiles))
97 <                for tmp in tmpOutFiles:
98 <                    tmp=string.strip(tmp)
99 <                    self.output_file.append(tmp)
100 <                    pass
101 <            else:
102 <                log.message("No output file defined: only stdout/err will be available")
93 >        # add fjr report by default via sandbox
94 >        self.output_file_sandbox.append(self.fjrFileName)
95 >
96 >        # other output files to be returned via sandbox or copied to SE
97 >        self.output_file = []
98 >        tmp = cfg_params.get('CMSSW.output_file',None)
99 >        if tmp :
100 >            tmpOutFiles = string.split(tmp,',')
101 >            log.debug(7, 'cmssw::cmssw(): output files '+str(tmpOutFiles))
102 >            for tmp in tmpOutFiles:
103 >                tmp=string.strip(tmp)
104 >                self.output_file.append(tmp)
105                  pass
106 <            pass
107 <        except KeyError:
108 <            log.message("No output file defined: only stdout/err will be available")
111 <            pass
106 >        else:
107 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
108 >        pass
109  
110          # script_exe file as additional file in inputSandbox
111 <        try:
112 <            self.scriptExe = cfg_params['USER.script_exe']
113 <            self.additional_inbox_files.append(self.scriptExe)
114 <            if self.scriptExe != '':
115 <               if not os.path.isfile(self.scriptExe):
116 <                  msg ="WARNING. file "+self.scriptExe+" not found"
117 <                  raise CrabException(msg)
118 <        except KeyError:
119 <           pass
120 <                  
111 >        self.scriptExe = cfg_params.get('USER.script_exe',None)
112 >        if self.scriptExe :
113 >            if not os.path.isfile(self.scriptExe):
114 >                msg ="ERROR. file "+self.scriptExe+" not found"
115 >                raise CrabException(msg)
116 >            self.additional_inbox_files.append(string.strip(self.scriptExe))
117 >
118 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
119 >            msg ="Error. script_exe  not defined"
120 >            raise CrabException(msg)
121 >
122          ## additional input files
123 <        try:
123 >        if cfg_params.has_key('USER.additional_input_files'):
124              tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
125              for tmp in tmpAddFiles:
126 <                if not os.path.exists(tmp):
127 <                    raise CrabException("Additional input file not found: "+tmp)
128 <                self.additional_inbox_files.append(string.strip(tmp))
126 >                tmp = string.strip(tmp)
127 >                dirname = ''
128 >                if not tmp[0]=="/": dirname = "."
129 >                files = []
130 >                if string.find(tmp,"*")>-1:
131 >                    files = glob.glob(os.path.join(dirname, tmp))
132 >                    if len(files)==0:
133 >                        raise CrabException("No additional input file found with this pattern: "+tmp)
134 >                else:
135 >                    files.append(tmp)
136 >                for file in files:
137 >                    if not os.path.exists(file):
138 >                        raise CrabException("Additional input file not found: "+file)
139 >                    pass
140 >                    self.additional_inbox_files.append(string.strip(file))
141                  pass
142              pass
143 <        except KeyError:
144 <            pass
135 <
136 <        # files per job
137 <        try:
138 <            self.filesPerJob = int(cfg_params['CMSSW.files_per_jobs']) #Daniele
139 <            self.selectFilesPerJob = 1
140 <        except KeyError:
141 <            self.filesPerJob = 0
142 <            self.selectFilesPerJob = 0
143 >            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
144 >        pass
145  
146          ## Events per job
147 <        try:
147 >        if cfg_params.has_key('CMSSW.events_per_job'):
148              self.eventsPerJob =int( cfg_params['CMSSW.events_per_job'])
149              self.selectEventsPerJob = 1
150 <        except KeyError:
150 >        else:
151              self.eventsPerJob = -1
152              self.selectEventsPerJob = 0
153 <    
153 >
154          ## number of jobs
155 <        try:
155 >        if cfg_params.has_key('CMSSW.number_of_jobs'):
156              self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs'])
157              self.selectNumberOfJobs = 1
158 <        except KeyError:
158 >        else:
159              self.theNumberOfJobs = 0
160              self.selectNumberOfJobs = 0
161  
162 <        ## source seed for pythia
161 <        try:
162 <            self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
163 <        except KeyError:
164 <            self.sourceSeed = None
165 <            common.logger.debug(5,"No seed given")
166 <
167 <        try:
168 <            self.sourceSeedVtx = int(cfg_params['CMSSW.vtx_seed'])
169 <        except KeyError:
170 <            self.sourceSeedVtx = None
171 <            common.logger.debug(5,"No vertex seed given")
172 <
173 <        if not (self.selectFilesPerJob + self.selectEventsPerJob + self.selectNumberOfJobs == 1 ):
174 <            msg = 'Must define either files_per_jobs or events_per_job or number_of_jobs'
175 <            raise CrabException(msg)
176 <
177 <        try:
162 >        if cfg_params.has_key('CMSSW.total_number_of_events'):
163              self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
164 <        except KeyError:
165 <            msg = 'Must define total_number_of_events'
166 <            raise CrabException(msg)
167 <        
183 <        CEBlackList = []
184 <        try:
185 <            tmpBad = string.split(cfg_params['EDG.ce_black_list'],',')
186 <            for tmp in tmpBad:
187 <                tmp=string.strip(tmp)
188 <                CEBlackList.append(tmp)
189 <        except KeyError:
190 <            pass
191 <
192 <        self.reCEBlackList=[]
193 <        for bad in CEBlackList:
194 <            self.reCEBlackList.append(re.compile( bad ))
195 <
196 <        common.logger.debug(5,'CEBlackList: '+str(CEBlackList))
197 <
198 <        CEWhiteList = []
199 <        try:
200 <            tmpGood = string.split(cfg_params['EDG.ce_white_list'],',')
201 <            for tmp in tmpGood:
202 <                tmp=string.strip(tmp)
203 <                CEWhiteList.append(tmp)
204 <        except KeyError:
205 <            pass
164 >            self.selectTotalNumberEvents = 1
165 >        else:
166 >            self.total_number_of_events = 0
167 >            self.selectTotalNumberEvents = 0
168  
169 <        #print 'CEWhiteList: ',CEWhiteList
170 <        self.reCEWhiteList=[]
171 <        for Good in CEWhiteList:
172 <            self.reCEWhiteList.append(re.compile( Good ))
169 >        if self.pset != None:
170 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
171 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
172 >                 raise CrabException(msg)
173 >        else:
174 >             if (self.selectNumberOfJobs == 0):
175 >                 msg = 'Must specify  number_of_jobs.'
176 >                 raise CrabException(msg)
177 >
178 >        ## New method of dealing with seeds
179 >        self.incrementSeeds = []
180 >        self.preserveSeeds = []
181 >        if cfg_params.has_key('CMSSW.preserve_seeds'):
182 >            tmpList = cfg_params['CMSSW.preserve_seeds'].split(',')
183 >            for tmp in tmpList:
184 >                tmp.strip()
185 >                self.preserveSeeds.append(tmp)
186 >        if cfg_params.has_key('CMSSW.increment_seeds'):
187 >            tmpList = cfg_params['CMSSW.increment_seeds'].split(',')
188 >            for tmp in tmpList:
189 >                tmp.strip()
190 >                self.incrementSeeds.append(tmp)
191 >
192 >        ## Old method of dealing with seeds
193 >        ## FUTURE: This is for old CMSSW and old CRAB. Can throw exceptions after a couple of CRAB releases and then
194 >        ## remove
195 >        self.sourceSeed = cfg_params.get('CMSSW.pythia_seed',None)
196 >        if self.sourceSeed:
197 >            print "pythia_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
198 >            self.incrementSeeds.append('sourceSeed')
199 >            self.incrementSeeds.append('theSource')
200 >
201 >        self.sourceSeedVtx = cfg_params.get('CMSSW.vtx_seed',None)
202 >        if self.sourceSeedVtx:
203 >            print "vtx_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
204 >            self.incrementSeeds.append('VtxSmeared')
205 >
206 >        self.sourceSeedG4 = cfg_params.get('CMSSW.g4_seed',None)
207 >        if self.sourceSeedG4:
208 >            print "g4_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
209 >            self.incrementSeeds.append('g4SimHits')
210 >
211 >        self.sourceSeedMix = cfg_params.get('CMSSW.mix_seed',None)
212 >        if self.sourceSeedMix:
213 >            print "mix_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
214 >            self.incrementSeeds.append('mix')
215 >
216 >        self.firstRun = cfg_params.get('CMSSW.first_run',None)
217 >
218 >        if self.pset != None: #CarlosDaniele
219 >            import PsetManipulator as pp
220 >            PsetEdit = pp.PsetManipulator(self.pset) #Daniele Pset
221  
222 <        common.logger.debug(5,'CEWhiteList: '+str(CEWhiteList))
222 >        # Copy/return
223  
224 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
224 >        self.copy_data = int(cfg_params.get('USER.copy_data',0))
225 >        self.return_data = int(cfg_params.get('USER.return_data',0))
226  
227          #DBSDLS-start
228 <        ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
228 >        ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
229          self.maxEvents=0  # max events available   ( --> check the requested nb. of evts in Creator.py)
230          self.DBSPaths={}  # all dbs paths requested ( --> input to the site local discovery script)
231 +        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
232          ## Perform the data location and discovery (based on DBS/DLS)
233          ## SL: Don't if NONE is specified as input (pythia use case)
234 <        common.analisys_common_info['sites']=None
234 >        blockSites = {}
235          if self.datasetPath:
236 <            self.DataDiscoveryAndLocation(cfg_params)
237 <        #DBSDLS-end          
236 >            blockSites = self.DataDiscoveryAndLocation(cfg_params)
237 >        #DBSDLS-end
238 >
239  
227        self.tgzNameWithPath = self.getTarBall(self.executable)
228    
240          ## Select Splitting
241 <        if self.selectNoInput: self.jobSplittingNoInput()
242 <        elif self.selectFilesPerJob or self.selectEventsPerJob or self.selectNumberOfJobs: self.jobSplittingPerFiles()
241 >        if self.selectNoInput:
242 >            if self.pset == None:
243 >                self.jobSplittingForScript()
244 >            else:
245 >                self.jobSplittingNoInput()
246          else:
247 <            msg = 'Don\'t know how to split...'
234 <            raise CrabException(msg)
247 >            self.jobSplittingByBlocks(blockSites)
248  
249          # modify Pset
250 <        try:
251 <            if (self.datasetPath): # standard job
252 <                #self.PsetEdit.maxEvent(self.eventsPerJob)
253 <                # always process all events in a file
254 <                self.PsetEdit.maxEvent("-1")
255 <                self.PsetEdit.inputModule("INPUT")
256 <
257 <            else:  # pythia like job
258 <                self.PsetEdit.maxEvent(self.eventsPerJob)
259 <                if (self.sourceSeed) :
260 <                    self.PsetEdit.pythiaSeed("INPUT")
248 <                    if (self.sourceSeedVtx) :
249 <                        self.PsetEdit.pythiaSeedVtx("INPUTVTX")
250 <            self.PsetEdit.psetWriter(self.configFilename())
251 <        except:
252 <            msg='Error while manipuliating ParameterSet: exiting...'
253 <            raise CrabException(msg)
250 >        if self.pset != None:
251 >            try:
252 >                # Add FrameworkJobReport to parameter-set, set max events.
253 >                # Reset later for data jobs by writeCFG which does all modifications
254 >                PsetEdit.addCrabFJR(self.fjrFileName) # FUTURE: Job report addition not needed by CMSSW>1.5
255 >                PsetEdit.maxEvent(self.eventsPerJob)
256 >                PsetEdit.psetWriter(self.configFilename())
257 >            except:
258 >                msg='Error while manipulating ParameterSet: exiting...'
259 >                raise CrabException(msg)
260 >        self.tgzNameWithPath = self.getTarBall(self.executable)
261  
262      def DataDiscoveryAndLocation(self, cfg_params):
263  
264 +        import DataDiscovery
265 +        import DataLocation
266          common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()")
267  
268          datasetPath=self.datasetPath
269  
261        ## TODO
262        dataTiersList = ""
263        dataTiers = dataTiersList.split(',')
264
270          ## Contact the DBS
271 +        common.logger.message("Contacting Data Discovery Services ...")
272          try:
273 <            self.pubdata=DataDiscovery_EDM.DataDiscovery_EDM(datasetPath, dataTiers, cfg_params)
273 >            self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
274              self.pubdata.fetchDBSInfo()
275  
276 <        except DataDiscovery_EDM.NotExistingDatasetError, ex :
276 >        except DataDiscovery.NotExistingDatasetError, ex :
277              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
278              raise CrabException(msg)
279 <
274 <        except DataDiscovery_EDM.NoDataTierinProvenanceError, ex :
279 >        except DataDiscovery.NoDataTierinProvenanceError, ex :
280              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
281              raise CrabException(msg)
282 <        except DataDiscovery_EDM.DataDiscoveryError, ex:
283 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
282 >        except DataDiscovery.DataDiscoveryError, ex:
283 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
284              raise CrabException(msg)
285  
286 <        ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
287 <        ## self.DBSPaths=self.pubdata.getDBSPaths()
288 <        common.logger.message("Required data are :"+self.datasetPath)
284 <
285 <        filesbyblock=self.pubdata.getFiles()
286 < #        print filesbyblock
287 <        self.AllInputFiles=filesbyblock.values()
288 <        self.files = self.AllInputFiles        
286 >        self.filesbyblock=self.pubdata.getFiles()
287 >        self.eventsbyblock=self.pubdata.getEventsPerBlock()
288 >        self.eventsbyfile=self.pubdata.getEventsPerFile()
289  
290          ## get max number of events
291 <        #common.logger.debug(10,"number of events for primary fileblocks %i"%self.pubdata.getMaxEvents())
292 <        self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
293 <        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
291 >        self.maxEvents=self.pubdata.getMaxEvents()
292  
293          ## Contact the DLS and build a list of sites hosting the fileblocks
294          try:
295 <            dataloc=DataLocation_EDM.DataLocation_EDM(filesbyblock.keys(),cfg_params)
295 >            dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params)
296              dataloc.fetchDLSInfo()
297 <        except DataLocation_EDM.DataLocationError , ex:
297 >        except DataLocation.DataLocationError , ex:
298              msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage()
299              raise CrabException(msg)
302        
303        allsites=dataloc.getSites()
304        common.logger.debug(5,"sites are %s"%allsites)
305        sites=self.checkBlackList(allsites)
306        common.logger.debug(5,"sites are (after black list) %s"%sites)
307        sites=self.checkWhiteList(sites)
308        common.logger.debug(5,"sites are (after white list) %s"%sites)
300  
310        if len(sites)==0:
311            msg = 'No sites hosting all the needed data! Exiting... '
312            raise CrabException(msg)
301  
302 <        common.logger.message("List of Sites ("+str(len(sites))+") hosting the data : "+str(sites))
303 <        common.logger.debug(6, "List of Sites: "+str(sites))
304 <        common.analisys_common_info['sites']=sites    ## used in SchedulerEdg.py in createSchScript
305 <        self.setParam_('TargetCE', ','.join(sites))
306 <        return
307 <    
308 <    def jobSplittingPerFiles(self):
321 <        """
322 <        Perform job splitting based on number of files to be accessed per job
323 <        """
324 <        common.logger.debug(5,'Splitting per input files')
325 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
326 <        common.logger.message('Available '+str(self.maxEvents)+' events in total ')
327 <        common.logger.message('Required '+str(self.filesPerJob)+' files per job ')
328 <        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
329 <        common.logger.message('Required '+str(self.eventsPerJob)+' events per job')
302 >        sites = dataloc.getSites()
303 >        allSites = []
304 >        listSites = sites.values()
305 >        for listSite in listSites:
306 >            for oneSite in listSite:
307 >                allSites.append(oneSite)
308 >        allSites = self.uniquelist(allSites)
309  
310 <        ## if asked to process all events, do it
311 <        if self.total_number_of_events == -1:
312 <            self.total_number_of_events=self.maxEvents
310 >        # screen output
311 >        common.logger.message("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n")
312 >
313 >        return sites
314 >
315 >    def jobSplittingByBlocks(self, blockSites):
316 >        """
317 >        Perform job splitting. Jobs run over an integer number of files
318 >        and no more than one block.
319 >        ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values
320 >        REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
321 >                  self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
322 >                  self.maxEvents, self.filesbyblock
323 >        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
324 >              self.total_number_of_jobs - Total # of jobs
325 >              self.list_of_args - File(s) job will run on (a list of lists)
326 >        """
327 >
328 >        # ---- Handle the possible job splitting configurations ---- #
329 >        if (self.selectTotalNumberEvents):
330 >            totalEventsRequested = self.total_number_of_events
331 >        if (self.selectEventsPerJob):
332 >            eventsPerJobRequested = self.eventsPerJob
333 >            if (self.selectNumberOfJobs):
334 >                totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob
335 >
336 >        # If user requested all the events in the dataset
337 >        if (totalEventsRequested == -1):
338 >            eventsRemaining=self.maxEvents
339 >        # If user requested more events than are in the dataset
340 >        elif (totalEventsRequested > self.maxEvents):
341 >            eventsRemaining = self.maxEvents
342 >            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
343 >        # If user requested less events than are in the dataset
344          else:
345 <            if self.total_number_of_events>self.maxEvents:
336 <                common.logger.message("Asked "+str(self.total_number_of_events)+" but only "+str(self.maxEvents)+" available.")
337 <                self.total_number_of_events=self.maxEvents
338 <            pass
345 >            eventsRemaining = totalEventsRequested
346  
347 <        ## TODO: SL need to have (from DBS) a detailed list of how many events per each file
348 <        n_tot_files = (len(self.files[0]))
349 <        ## SL: this is wrong if the files have different number of events
343 <        evPerFile = int(self.maxEvents)/n_tot_files
344 <
345 <        common.logger.debug(5,'Events per File '+str(evPerFile))
346 <
347 <        ## compute job splitting parameters: filesPerJob, eventsPerJob and theNumberOfJobs
348 <        if self.selectFilesPerJob:
349 <            ## user define files per event.
350 <            filesPerJob = self.filesPerJob
351 <            eventsPerJob = filesPerJob*evPerFile
352 <            theNumberOfJobs = int(self.total_number_of_events*1./eventsPerJob)
353 <            check = int(self.total_number_of_events) - (theNumberOfJobs*eventsPerJob)
354 <            if check > 0:
355 <                theNumberOfJobs +=1
356 <                filesLastJob = int(check*1./evPerFile+0.5)
357 <                common.logger.message('Warning: last job will be created with '+str(check)+' files')
358 <            else:
359 <                filesLastJob = filesPerJob
347 >        # If user requested more events per job than are in the dataset
348 >        if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents):
349 >            eventsPerJobRequested = self.maxEvents
350  
351 <        elif self.selectNumberOfJobs:
352 <            ## User select the number of jobs: last might be bigger to match request of events
363 <            theNumberOfJobs =  self.theNumberOfJobs
364 <
365 <            eventsPerJob = self.total_number_of_events/theNumberOfJobs
366 <            filesPerJob = int(eventsPerJob/evPerFile)
367 <            if (filesPerJob==0) : filesPerJob=1
368 <            check = int(self.total_number_of_events) - (int(theNumberOfJobs)*filesPerJob*evPerFile)
369 <            if not check == 0:
370 <                if check<0:
371 <                    missingFiles = int(check/evPerFile)
372 <                    additionalJobs = int(missingFiles/filesPerJob)
373 <                    #print missingFiles, additionalJobs
374 <                    theNumberOfJobs+=additionalJobs
375 <                    common.logger.message('Warning: will create only '+str(theNumberOfJobs)+' jobs')
376 <                    check = int(self.total_number_of_events) - (int(theNumberOfJobs)*filesPerJob*evPerFile)
377 <                    
378 <                if check >0 :
379 <                    filesLastJob = filesPerJob+int(check*1./evPerFile+0.5)
380 <                    common.logger.message('Warning: last job will be created with '+str(filesLastJob*evPerFile)+' events')
381 <                else:
382 <                    filesLastJob = filesPerJob
383 <            else:
384 <                filesLastJob = filesPerJob
385 <        elif self.selectEventsPerJob:
386 <            # SL case if asked events per job
387 <            ## estimate the number of files per job to match the user requirement
388 <            filesPerJob = int(float(self.eventsPerJob)/float(evPerFile))
389 <            if filesPerJob==0: filesPerJob=1
390 <            common.logger.debug(5,"filesPerJob "+str(filesPerJob))
391 <            if (filesPerJob==0): filesPerJob=1
392 <            eventsPerJob=filesPerJob*evPerFile
393 <            theNumberOfJobs = int(self.total_number_of_events)/int(eventsPerJob)
394 <            check = int(self.total_number_of_events) - (int(theNumberOfJobs)*eventsPerJob)
395 <            if not check == 0:
396 <                missingFiles = int(check/evPerFile)
397 <                additionalJobs = int(missingFiles/filesPerJob)
398 <                if ( additionalJobs>0) : theNumberOfJobs+=additionalJobs
399 <                check = int(self.total_number_of_events) - (int(theNumberOfJobs)*eventsPerJob)
400 <                if not check == 0:
401 <                    if (check <0 ):
402 <                        filesLastJob = filesPerJob+int(check*1./evPerFile-0.5)
403 <                    else:
404 <                        theNumberOfJobs+=1
405 <                        filesLastJob = int(check*1./evPerFile+0.5)
351 >        # For user info at end
352 >        totalEventCount = 0
353  
354 <                    common.logger.message('Warning: last job will be created with '+str(filesLastJob*evPerFile)+' events')
355 <                else:
409 <                    filesLastJob = filesPerJob
410 <            else:
411 <                filesLastJob = filesPerJob
412 <        
413 <        self.total_number_of_jobs = theNumberOfJobs
354 >        if (self.selectTotalNumberEvents and self.selectNumberOfJobs):
355 >            eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
356  
357 <        totalEventsToBeUsed=theNumberOfJobs*filesPerJob*evPerFile
358 <        if not check == 0:
417 <        #    print (theNumberOfJobs-1)*filesPerJob*evPerFile,filesLastJob*evPerFile
418 <            totalEventsToBeUsed=(theNumberOfJobs-1)*filesPerJob*evPerFile+filesLastJob*evPerFile
357 >        if (self.selectNumberOfJobs):
358 >            common.logger.message("May not create the exact number_of_jobs requested.")
359  
360 <        common.logger.message(str(self.total_number_of_jobs)+' jobs will be created, each for '+str(filesPerJob*evPerFile)+' events, for a total of '+str(totalEventsToBeUsed)+' events')
360 >        if ( self.ncjobs == 'all' ) :
361 >            totalNumberOfJobs = 999999999
362 >        else :
363 >            totalNumberOfJobs = self.ncjobs
364  
365 <        totalFilesToBeUsed=filesPerJob*(theNumberOfJobs-1)+filesLastJob
365 >        blocks = blockSites.keys()
366 >        blockCount = 0
367 >        # Backup variable in case self.maxEvents counted events in a non-included block
368 >        numBlocksInDataset = len(blocks)
369  
370 <        ## set job arguments (files)
370 >        jobCount = 0
371          list_of_lists = []
426        lastFile=0
427        for i in range(0, int(totalFilesToBeUsed), filesPerJob)[:-1]:
428            parString = "\\{"
429            
430            lastFile=i+filesPerJob
431            params = self.files[0][i: lastFile]
432            for i in range(len(params) - 1):
433                parString += '\\\"' + params[i] + '\\\"\,'
434            
435            parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
436            list_of_lists.append([parString])
437            pass
372  
373 <        ## last job
374 <        parString = "\\{"
375 <        
376 <        params = self.files[0][lastFile: lastFile+filesLastJob]
377 <        for i in range(len(params) - 1):
378 <            parString += '\\\"' + params[i] + '\\\"\,'
379 <        
380 <        parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
381 <        list_of_lists.append([parString])
382 <        pass
373 >        # list tracking which jobs are in which jobs belong to which block
374 >        jobsOfBlock = {}
375 >
376 >        # ---- Iterate over the blocks in the dataset until ---- #
377 >        # ---- we've met the requested total # of events    ---- #
378 >        while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
379 >            block = blocks[blockCount]
380 >            blockCount += 1
381 >            if block not in jobsOfBlock.keys() :
382 >                jobsOfBlock[block] = []
383 >
384 >            if self.eventsbyblock.has_key(block) :
385 >                numEventsInBlock = self.eventsbyblock[block]
386 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
387 >
388 >                files = self.filesbyblock[block]
389 >                numFilesInBlock = len(files)
390 >                if (numFilesInBlock <= 0):
391 >                    continue
392 >                fileCount = 0
393 >
394 >                # ---- New block => New job ---- #
395 >                parString = ""
396 >                # counter for number of events in files currently worked on
397 >                filesEventCount = 0
398 >                # flag if next while loop should touch new file
399 >                newFile = 1
400 >                # job event counter
401 >                jobSkipEventCount = 0
402 >
403 >                # ---- Iterate over the files in the block until we've met the requested ---- #
404 >                # ---- total # of events or we've gone over all the files in this block  ---- #
405 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
406 >                    file = files[fileCount]
407 >                    if newFile :
408 >                        try:
409 >                            numEventsInFile = self.eventsbyfile[file]
410 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
411 >                            # increase filesEventCount
412 >                            filesEventCount += numEventsInFile
413 >                            # Add file to current job
414 >                            parString += '\\\"' + file + '\\\"\,'
415 >                            newFile = 0
416 >                        except KeyError:
417 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
418 >
419 >                    eventsPerJobRequested = min(eventsPerJobRequested, eventsRemaining)
420 >                    # if less events in file remain than eventsPerJobRequested
421 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested):
422 >                        # if last file in block
423 >                        if ( fileCount == numFilesInBlock-1 ) :
424 >                            # end job using last file, use remaining events in block
425 >                            # close job and touch new file
426 >                            fullString = parString[:-2]
427 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
428 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
429 >                            self.jobDestination.append(blockSites[block])
430 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
431 >                            # fill jobs of block dictionary
432 >                            jobsOfBlock[block].append(jobCount+1)
433 >                            # reset counter
434 >                            jobCount = jobCount + 1
435 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
436 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
437 >                            jobSkipEventCount = 0
438 >                            # reset file
439 >                            parString = ""
440 >                            filesEventCount = 0
441 >                            newFile = 1
442 >                            fileCount += 1
443 >                        else :
444 >                            # go to next file
445 >                            newFile = 1
446 >                            fileCount += 1
447 >                    # if events in file equal to eventsPerJobRequested
448 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
449 >                        # close job and touch new file
450 >                        fullString = parString[:-2]
451 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
452 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
453 >                        self.jobDestination.append(blockSites[block])
454 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
455 >                        jobsOfBlock[block].append(jobCount+1)
456 >                        # reset counter
457 >                        jobCount = jobCount + 1
458 >                        totalEventCount = totalEventCount + eventsPerJobRequested
459 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
460 >                        jobSkipEventCount = 0
461 >                        # reset file
462 >                        parString = ""
463 >                        filesEventCount = 0
464 >                        newFile = 1
465 >                        fileCount += 1
466 >
467 >                    # if more events in file remain than eventsPerJobRequested
468 >                    else :
469 >                        # close job but don't touch new file
470 >                        fullString = parString[:-2]
471 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
472 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
473 >                        self.jobDestination.append(blockSites[block])
474 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
475 >                        jobsOfBlock[block].append(jobCount+1)
476 >                        # increase counter
477 >                        jobCount = jobCount + 1
478 >                        totalEventCount = totalEventCount + eventsPerJobRequested
479 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
480 >                        # calculate skip events for last file
481 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
482 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
483 >                        # remove all but the last file
484 >                        filesEventCount = self.eventsbyfile[file]
485 >                        parString = '\\\"' + file + '\\\"\,'
486 >                    pass # END if
487 >                pass # END while (iterate over files in the block)
488 >        pass # END while (iterate over blocks in the dataset)
489 >        self.ncjobs = self.total_number_of_jobs = jobCount
490 >        if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
491 >            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
492 >        common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
493 >
494 >        # screen output
495 >        screenOutput = "List of jobs and available destination sites:\n\n"
496 >
497 >        # keep trace of block with no sites to print a warning at the end
498 >        noSiteBlock = []
499 >        bloskNoSite = []
500 >
501 >        blockCounter = 0
502 >        for block in blocks:
503 >            if block in jobsOfBlock.keys() :
504 >                blockCounter += 1
505 >                screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),
506 >                    ','.join(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],block),block)))
507 >                if len(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],block),block)) == 0:
508 >                    noSiteBlock.append( spanRanges(jobsOfBlock[block]) )
509 >                    bloskNoSite.append( blockCounter )
510 >
511 >        common.logger.message(screenOutput)
512 >        if len(noSiteBlock) > 0 and len(bloskNoSite) > 0:
513 >            msg = 'WARNING: No sites are hosting any part of data for block:\n                '
514 >            virgola = ""
515 >            if len(bloskNoSite) > 1:
516 >                virgola = ","
517 >            for block in bloskNoSite:
518 >                msg += ' ' + str(block) + virgola
519 >            msg += '\n               Related jobs:\n                 '
520 >            virgola = ""
521 >            if len(noSiteBlock) > 1:
522 >                virgola = ","
523 >            for range_jobs in noSiteBlock:
524 >                msg += str(range_jobs) + virgola
525 >            msg += '\n               will not be submitted and this block of data can not be analyzed!\n'
526 >            if self.cfg_params.has_key('EDG.se_white_list'):
527 >                msg += 'WARNING: SE White List: '+self.cfg_params['EDG.se_white_list']+'\n'
528 >                msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
529 >                msg += 'Please check if the dataset is available at this site!)\n'
530 >            if self.cfg_params.has_key('EDG.ce_white_list'):
531 >                msg += 'WARNING: CE White List: '+self.cfg_params['EDG.ce_white_list']+'\n'
532 >                msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
533 >                msg += 'Please check if the dataset is available at this site!)\n'
534 >
535 >            common.logger.message(msg)
536  
537          self.list_of_args = list_of_lists
451        # print self.list_of_args[0]
538          return
539  
540      def jobSplittingNoInput(self):
# Line 456 | Line 542 | class Cmssw(JobType):
542          Perform job splitting based on number of event per job
543          """
544          common.logger.debug(5,'Splitting per events')
545 <        common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
546 <        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
547 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
545 >
546 >        if (self.selectEventsPerJob):
547 >            common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
548 >        if (self.selectNumberOfJobs):
549 >            common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
550 >        if (self.selectTotalNumberEvents):
551 >            common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
552  
553          if (self.total_number_of_events < 0):
554              msg='Cannot split jobs per Events with "-1" as total number of events'
555              raise CrabException(msg)
556  
557          if (self.selectEventsPerJob):
558 <            self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
558 >            if (self.selectTotalNumberEvents):
559 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
560 >            elif(self.selectNumberOfJobs) :
561 >                self.total_number_of_jobs =self.theNumberOfJobs
562 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
563 >
564          elif (self.selectNumberOfJobs) :
565              self.total_number_of_jobs = self.theNumberOfJobs
566              self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
# Line 477 | Line 572 | class Cmssw(JobType):
572  
573          common.logger.debug(5,'Check  '+str(check))
574  
575 <        common.logger.message(str(self.total_number_of_jobs)+' jobs will be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
575 >        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
576          if check > 0:
577 <            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but will do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
483 <
577 >            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
578  
579          # argument is seed number.$i
580          self.list_of_args = []
581          for i in range(self.total_number_of_jobs):
582 <            if (self.sourceSeed):
583 <                if (self.sourceSeedVtx):
584 <                    ## pythia + vtx random seed
585 <                    self.list_of_args.append([
586 <                                              str(self.sourceSeed)+str(i),
587 <                                              str(self.sourceSeedVtx)+str(i)
588 <                                              ])
495 <                else:
496 <                    ## only pythia random seed
497 <                    self.list_of_args.append([(str(self.sourceSeed)+str(i))])
498 <            else:
499 <                ## no random seed
500 <                self.list_of_args.append([str(i)])
501 <        #print self.list_of_args
582 >            ## Since there is no input, any site is good
583 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
584 >            args=[]
585 >            if (self.firstRun):
586 >                ## pythia first run
587 >                args.append(str(self.firstRun)+str(i))
588 >            self.list_of_args.append(args)
589  
590          return
591  
592 +
593 +    def jobSplittingForScript(self):
594 +        """
595 +        Perform job splitting based on number of job
596 +        """
597 +        common.logger.debug(5,'Splitting per job')
598 +        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
599 +
600 +        self.total_number_of_jobs = self.theNumberOfJobs
601 +
602 +        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
603 +
604 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
605 +
606 +        # argument is seed number.$i
607 +        self.list_of_args = []
608 +        for i in range(self.total_number_of_jobs):
609 +            self.jobDestination.append([""])
610 +            self.list_of_args.append([str(i)])
611 +        return
612 +
613      def split(self, jobParams):
614 <
507 <        common.jobDB.load()
508 <        #### Fabio
614 >
615          njobs = self.total_number_of_jobs
616          arglist = self.list_of_args
617          # create the empty structure
618          for i in range(njobs):
619              jobParams.append("")
620 <        
620 >
621 >        listID=[]
622 >        listField=[]
623          for job in range(njobs):
624              jobParams[job] = arglist[job]
625 <            # print str(arglist[job])
626 <            # print jobParams[job]
627 <            common.jobDB.setArguments(job, jobParams[job])
625 >            listID.append(job+1)
626 >            job_ToSave ={}
627 >            concString = ' '
628 >            argu=''
629 >            if len(jobParams[job]):
630 >                argu +=   concString.join(jobParams[job] )
631 >            job_ToSave['arguments']= str(job+1)+' '+argu
632 >            job_ToSave['dlsDestination']= self.jobDestination[job]
633 >            listField.append(job_ToSave)
634 >            msg="Job "+str(job)+" Arguments:   "+str(job+1)+" "+argu+"\n"  \
635 >            +"                     Destination: "+str(self.jobDestination[job])
636 >            common.logger.debug(5,msg)
637 >        common._db.updateJob_(listID,listField)
638 >        self.argsList = (len(jobParams[0])+1)
639  
521        common.jobDB.save()
640          return
641 <    
524 <    def getJobTypeArguments(self, nj, sched):
525 <        result = ''
526 <        for i in common.jobDB.arguments(nj):
527 <            result=result+str(i)+" "
528 <        return result
529 <  
641 >
642      def numberOfJobs(self):
531        # Fabio
643          return self.total_number_of_jobs
644  
534    def checkBlackList(self, allSites):
535        if len(self.reCEBlackList)==0: return allSites
536        sites = []
537        for site in allSites:
538            common.logger.debug(10,'Site '+site)
539            good=1
540            for re in self.reCEBlackList:
541                if re.search(site):
542                    common.logger.message('CE in black list, skipping site '+site)
543                    good=0
544                pass
545            if good: sites.append(site)
546        if len(sites) == 0:
547            common.logger.debug(3,"No sites found after BlackList")
548        return sites
549
550    def checkWhiteList(self, allSites):
551
552        if len(self.reCEWhiteList)==0: return allSites
553        sites = []
554        for site in allSites:
555            good=0
556            for re in self.reCEWhiteList:
557                if re.search(site):
558                    common.logger.debug(5,'CE in white list, adding site '+site)
559                    good=1
560                if not good: continue
561                sites.append(site)
562        if len(sites) == 0:
563            common.logger.message("No sites found after WhiteList\n")
564        else:
565            common.logger.debug(5,"Selected sites via WhiteList are "+str(sites)+"\n")
566        return sites
567
645      def getTarBall(self, exe):
646          """
647          Return the TarBall with lib and exe
648          """
649 <        
573 <        # if it exist, just return it
574 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
649 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
650          if os.path.exists(self.tgzNameWithPath):
651              return self.tgzNameWithPath
652  
# Line 584 | Line 659 | class Cmssw(JobType):
659  
660          # First of all declare the user Scram area
661          swArea = self.scram.getSWArea_()
587        #print "swArea = ", swArea
588        swVersion = self.scram.getSWVersion()
589        #print "swVersion = ", swVersion
662          swReleaseTop = self.scram.getReleaseTop_()
663 <        #print "swReleaseTop = ", swReleaseTop
592 <        
663 >
664          ## check if working area is release top
665          if swReleaseTop == '' or swArea == swReleaseTop:
666 +            common.logger.debug(3,"swArea = "+swArea+" swReleaseTop ="+swReleaseTop)
667              return
668  
669 <        filesToBeTarred = []
670 <        ## First find the executable
671 <        if (self.executable != ''):
672 <            exeWithPath = self.scram.findFile_(executable)
673 < #           print exeWithPath
674 <            if ( not exeWithPath ):
675 <                raise CrabException('User executable '+executable+' not found')
676 <
677 <            ## then check if it's private or not
678 <            if exeWithPath.find(swReleaseTop) == -1:
679 <                # the exe is private, so we must ship
680 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
681 <                path = swArea+'/'
682 <                exe = string.replace(exeWithPath, path,'')
683 <                filesToBeTarred.append(exe)
684 <                pass
685 <            else:
686 <                # the exe is from release, we'll find it on WN
687 <                pass
688 <
689 <        ## Now get the libraries: only those in local working area
690 <        libDir = 'lib'
691 <        lib = swArea+'/' +libDir
692 <        common.logger.debug(5,"lib "+lib+" to be tarred")
693 <        if os.path.exists(lib):
694 <            filesToBeTarred.append(libDir)
695 <
696 <        ## Now check if module dir is present
697 <        moduleDir = 'module'
698 <        if os.path.isdir(swArea+'/'+moduleDir):
699 <            filesToBeTarred.append(moduleDir)
700 <
701 <        ## Now check if the Data dir is present
702 <        dataDir = 'src/Data/'
703 <        if os.path.isdir(swArea+'/'+dataDir):
704 <            filesToBeTarred.append(dataDir)
705 <
706 <        ## Create the tar-ball
707 <        if len(filesToBeTarred)>0:
708 <            cwd = os.getcwd()
709 <            os.chdir(swArea)
710 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
711 <            for line in filesToBeTarred:
712 <                tarcmd = tarcmd + line + ' '
713 <            cout = runCommand(tarcmd)
714 <            if not cout:
715 <                raise CrabException('Could not create tar-ball')
716 <            os.chdir(cwd)
717 <        else:
718 <            common.logger.debug(5,"No files to be to be tarred")
719 <        
720 <        return
721 <        
722 <    def wsSetupEnvironment(self, nj):
669 >        import tarfile
670 >        try: # create tar ball
671 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
672 >            ## First find the executable
673 >            if (self.executable != ''):
674 >                exeWithPath = self.scram.findFile_(executable)
675 >                if ( not exeWithPath ):
676 >                    raise CrabException('User executable '+executable+' not found')
677 >
678 >                ## then check if it's private or not
679 >                if exeWithPath.find(swReleaseTop) == -1:
680 >                    # the exe is private, so we must ship
681 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
682 >                    path = swArea+'/'
683 >                    # distinguish case when script is in user project area or given by full path somewhere else
684 >                    if exeWithPath.find(path) >= 0 :
685 >                        exe = string.replace(exeWithPath, path,'')
686 >                        tar.add(path+exe,exe)
687 >                    else :
688 >                        tar.add(exeWithPath,os.path.basename(executable))
689 >                    pass
690 >                else:
691 >                    # the exe is from release, we'll find it on WN
692 >                    pass
693 >
694 >            ## Now get the libraries: only those in local working area
695 >            libDir = 'lib'
696 >            lib = swArea+'/' +libDir
697 >            common.logger.debug(5,"lib "+lib+" to be tarred")
698 >            if os.path.exists(lib):
699 >                tar.add(lib,libDir)
700 >
701 >            ## Now check if module dir is present
702 >            moduleDir = 'module'
703 >            module = swArea + '/' + moduleDir
704 >            if os.path.isdir(module):
705 >                tar.add(module,moduleDir)
706 >
707 >            ## Now check if any data dir(s) is present
708 >            swAreaLen=len(swArea)
709 >            self.dataExist = False
710 >            for root, dirs, files in os.walk(swArea):
711 >                if "data" in dirs:
712 >                    self.dataExist=True
713 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
714 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
715 >
716 >            ### CMSSW ParameterSet
717 >            if not self.pset is None:
718 >                cfg_file = common.work_space.jobDir()+self.configFilename()
719 >                tar.add(cfg_file,self.configFilename())
720 >                common.logger.debug(5,"File added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
721 >
722 >
723 >            ## Add ProdCommon dir to tar
724 >            prodcommonDir = 'ProdCommon'
725 >            prodcommonPath = os.environ['CRABDIR'] + '/' + 'ProdCommon'
726 >            if os.path.isdir(prodcommonPath):
727 >                tar.add(prodcommonPath,prodcommonDir)
728 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
729 >
730 >            ##### ML stuff
731 >            ML_file_list=['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py']
732 >            path=os.environ['CRABDIR'] + '/python/'
733 >            for file in ML_file_list:
734 >                tar.add(path+file,file)
735 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
736 >
737 >            ##### Utils
738 >            Utils_file_list=['parseCrabFjr.py','writeCfg.py', 'JobReportErrorCode.py']
739 >            for file in Utils_file_list:
740 >                tar.add(path+file,file)
741 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
742 >
743 >            ##### AdditionalFiles
744 >            for file in self.additional_inbox_files:
745 >                tar.add(file,string.split(file,'/')[-1])
746 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
747 >
748 >            tar.close()
749 >        except :
750 >            raise CrabException('Could not create tar-ball')
751 >
752 >        ## check for tarball size
753 >        tarballinfo = os.stat(self.tgzNameWithPath)
754 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
755 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
756 >
757 >        ## create tar-ball with ML stuff
758 >
759 >    def wsSetupEnvironment(self, nj=0):
760          """
761          Returns part of a job script which prepares
762          the execution environment for the job 'nj'.
763          """
764 +        if (self.CMSSW_major >= 2 and self.CMSSW_minor >= 1) or (self.CMSSW_major >= 3):
765 +            psetName = 'pset.py'
766 +        else:
767 +            psetName = 'pset.cfg'
768          # Prepare JobType-independent part
769 <        txt = ''
770 <  
771 <        ## OLI_Daniele at this level  middleware already known
659 <
660 <        txt += 'if [ $middleware == LCG ]; then \n'
769 >        txt = '\n#Written by cms_cmssw::wsSetupEnvironment\n'
770 >        txt += 'echo ">>> setup environment"\n'
771 >        txt += 'if [ $middleware == LCG ]; then \n'
772          txt += self.wsSetupCMSLCGEnvironment_()
773          txt += 'elif [ $middleware == OSG ]; then\n'
774 <        txt += '    time=`date -u +"%s"`\n'
775 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
776 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
777 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
778 <        txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
668 <        txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
669 <        txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
670 <        txt += '        echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
671 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
672 <        txt += '        rm -f $RUNTIME_AREA/$repo \n'
673 <        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
674 <        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
675 <        txt += '        exit 1\n'
774 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
775 >        txt += '    if [ ! $? == 0 ] ;then\n'
776 >        txt += '        echo "ERROR ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
777 >        txt += '        job_exit_code=10016\n'
778 >        txt += '        func_exit\n'
779          txt += '    fi\n'
780 +        txt += '    echo ">>> Created working directory: $WORKING_DIR"\n'
781          txt += '\n'
782          txt += '    echo "Change to working directory: $WORKING_DIR"\n'
783          txt += '    cd $WORKING_DIR\n'
784 <        txt += self.wsSetupCMSOSGEnvironment_()
784 >        txt += '    echo ">>> current directory (WORKING_DIR): $WORKING_DIR"\n'
785 >        txt += self.wsSetupCMSOSGEnvironment_()
786          txt += 'fi\n'
787  
788          # Prepare JobType-specific part
789          scram = self.scram.commandName()
790          txt += '\n\n'
791 <        txt += 'echo "### SPECIFIC JOB SETUP ENVIRONMENT ###"\n'
791 >        txt += 'echo ">>> specific cmssw setup environment:"\n'
792 >        txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
793          txt += scram+' project CMSSW '+self.version+'\n'
794          txt += 'status=$?\n'
795          txt += 'if [ $status != 0 ] ; then\n'
796 <        txt += '   echo "SET_EXE_ENV 10034 ==>ERROR CMSSW '+self.version+' not found on `hostname`" \n'
797 <        txt += '   echo "JOB_EXIT_STATUS = 10034"\n'
798 <        txt += '   echo "JobExitCode=10034" | tee -a $RUNTIME_AREA/$repo\n'
693 <        txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
694 <        txt += '   rm -f $RUNTIME_AREA/$repo \n'
695 <        txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
696 <        txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
697 <        ## OLI_Daniele
698 <        txt += '    if [ $middleware == OSG ]; then \n'
699 <        txt += '        echo "Remove working directory: $WORKING_DIR"\n'
700 <        txt += '        cd $RUNTIME_AREA\n'
701 <        txt += '        /bin/rm -rf $WORKING_DIR\n'
702 <        txt += '        if [ -d $WORKING_DIR ] ;then\n'
703 <        txt += '            echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
704 <        txt += '            echo "JOB_EXIT_STATUS = 10018"\n'
705 <        txt += '            echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
706 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
707 <        txt += '            rm -f $RUNTIME_AREA/$repo \n'
708 <        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
709 <        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
710 <        txt += '        fi\n'
711 <        txt += '    fi \n'
712 <        txt += '   exit 1 \n'
796 >        txt += '    echo "ERROR ==> CMSSW '+self.version+' not found on `hostname`" \n'
797 >        txt += '    job_exit_code=10034\n'
798 >        txt += '    func_exit\n'
799          txt += 'fi \n'
714        txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
800          txt += 'cd '+self.version+'\n'
801 <        ### needed grep for bug in scramv1 ###
801 >        txt += 'SOFTWARE_DIR=`pwd`\n'
802 >        txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n'
803          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
804 <
804 >        txt += 'if [ $? != 0 ] ; then\n'
805 >        txt += '    echo "ERROR ==> Problem with the command: "\n'
806 >        txt += '    echo "eval \`'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME \` at `hostname`"\n'
807 >        txt += '    job_exit_code=10034\n'
808 >        txt += '    func_exit\n'
809 >        txt += 'fi \n'
810          # Handle the arguments:
811          txt += "\n"
812          txt += "## number of arguments (first argument always jobnumber)\n"
813          txt += "\n"
814 < #        txt += "narg=$#\n"
724 <        txt += "if [ $nargs -lt 2 ]\n"
814 >        txt += "if [ $nargs -lt "+str(self.argsList)+" ]\n"
815          txt += "then\n"
816 <        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$nargs+ \n"
817 <        txt += '    echo "JOB_EXIT_STATUS = 50113"\n'
818 <        txt += '    echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
729 <        txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
730 <        txt += '    rm -f $RUNTIME_AREA/$repo \n'
731 <        txt += '    echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
732 <        txt += '    echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
733 <        ## OLI_Daniele
734 <        txt += '    if [ $middleware == OSG ]; then \n'
735 <        txt += '        echo "Remove working directory: $WORKING_DIR"\n'
736 <        txt += '        cd $RUNTIME_AREA\n'
737 <        txt += '        /bin/rm -rf $WORKING_DIR\n'
738 <        txt += '        if [ -d $WORKING_DIR ] ;then\n'
739 <        txt += '            echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
740 <        txt += '            echo "JOB_EXIT_STATUS = 50114"\n'
741 <        txt += '            echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
742 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
743 <        txt += '            rm -f $RUNTIME_AREA/$repo \n'
744 <        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
745 <        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
746 <        txt += '        fi\n'
747 <        txt += '    fi \n'
748 <        txt += "    exit 1\n"
816 >        txt += "    echo 'ERROR ==> Too few arguments' +$nargs+ \n"
817 >        txt += '    job_exit_code=50113\n'
818 >        txt += "    func_exit\n"
819          txt += "fi\n"
820          txt += "\n"
821  
822          # Prepare job-specific part
823          job = common.job_list[nj]
824 <        pset = os.path.basename(job.configFilename())
825 <        txt += '\n'
826 <        if (self.datasetPath): # standard job
827 <            #txt += 'InputFiles=$2\n'
828 <            txt += 'InputFiles=${args[1]}\n'
759 <            txt += 'echo "Inputfiles:<$InputFiles>"\n'
760 <            txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
761 <        else:  # pythia like job
762 <            if (self.sourceSeed):
763 <                txt += 'Seed=$2\n'
764 <                txt += 'echo "Seed: <$Seed>"\n'
765 <                txt += 'sed "s#\<INPUT\>#$Seed#" $RUNTIME_AREA/'+pset+' > tmp.cfg\n'
766 <                if (self.sourceSeedVtx):
767 <                    txt += 'VtxSeed=$3\n'
768 <                    txt += 'echo "VtxSeed: <$VtxSeed>"\n'
769 <                    txt += 'sed "s#INPUTVTX#$VtxSeed#" tmp.cfg > pset.cfg\n'
770 <                else:
771 <                    txt += 'mv tmp.cfg pset.cfg\n'
772 <            else:
773 <                txt += '# Copy untouched pset\n'
774 <                txt += 'cp $RUNTIME_AREA/'+pset+' pset.cfg\n'
824 >        if (self.datasetPath):
825 >            txt += '\n'
826 >            txt += 'DatasetPath='+self.datasetPath+'\n'
827 >
828 >            datasetpath_split = self.datasetPath.split("/")
829  
830 +            txt += 'PrimaryDataset='+datasetpath_split[1]+'\n'
831 +            txt += 'DataTier='+datasetpath_split[2]+'\n'
832 +            txt += 'ApplicationFamily=cmsRun\n'
833  
834 <        if len(self.additional_inbox_files) > 0:
835 <            for file in self.additional_inbox_files:
836 <                relFile = file.split("/")[-1]
837 <                txt += 'if [ -e $RUNTIME_AREA/'+relFile+' ] ; then\n'
838 <                txt += '   cp $RUNTIME_AREA/'+relFile+' .\n'
839 <                txt += '   chmod +x '+relFile+'\n'
840 <                txt += 'fi\n'
841 <            pass
834 >        else:
835 >            txt += 'DatasetPath=MCDataTier\n'
836 >            txt += 'PrimaryDataset=null\n'
837 >            txt += 'DataTier=null\n'
838 >            txt += 'ApplicationFamily=MCDataTier\n'
839 >        if self.pset != None:
840 >            pset = os.path.basename(job.configFilename())
841 >            txt += '\n'
842 >            txt += 'cp  $RUNTIME_AREA/'+pset+' .\n'
843 >            if (self.datasetPath): # standard job
844 >                txt += 'InputFiles=${args[1]}; export InputFiles\n'
845 >                txt += 'MaxEvents=${args[2]}; export MaxEvents\n'
846 >                txt += 'SkipEvents=${args[3]}; export SkipEvents\n'
847 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
848 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
849 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
850 >            else:  # pythia like job
851 >                txt += 'PreserveSeeds='  + ','.join(self.preserveSeeds)  + '; export PreserveSeeds\n'
852 >                txt += 'IncrementSeeds=' + ','.join(self.incrementSeeds) + '; export IncrementSeeds\n'
853 >                txt += 'echo "PreserveSeeds: <$PreserveSeeds>"\n'
854 >                txt += 'echo "IncrementSeeds:<$IncrementSeeds>"\n'
855 >                if (self.firstRun):
856 >                    txt += 'FirstRun=${args[1]}; export FirstRun\n'
857 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
858  
859 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
859 >            txt += 'mv -f ' + pset + ' ' + psetName + '\n'
860  
861 <        txt += '\n'
862 <        txt += 'echo "***** cat pset.cfg *********"\n'
863 <        txt += 'cat pset.cfg\n'
864 <        txt += 'echo "****** end pset.cfg ********"\n'
865 <        txt += '\n'
866 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
867 <        # txt += 'cat pset1.cfg\n'
868 <        # txt += 'echo "****** end pset1.cfg ********"\n'
861 >
862 >        if self.pset != None:
863 >            # FUTURE: Can simply for 2_1_x and higher
864 >            txt += '\n'
865 >            if self.debug_pset==True:  
866 >                txt += 'echo "***** cat ' + psetName + ' *********"\n'
867 >                txt += 'cat ' + psetName + '\n'
868 >                txt += 'echo "****** end ' + psetName + ' ********"\n'
869 >                txt += '\n'
870 >            txt += 'PSETHASH=`edmConfigHash < ' + psetName + '` \n'
871 >            txt += 'echo "PSETHASH = $PSETHASH" \n'
872 >            txt += '\n'
873          return txt
874  
875 <    def wsBuildExe(self, nj):
875 >    def wsUntarSoftware(self, nj=0):
876          """
877          Put in the script the commands to build an executable
878          or a library.
879          """
880  
881 <        txt = ""
881 >        txt = '\n#Written by cms_cmssw::wsUntarSoftware\n'
882  
883          if os.path.isfile(self.tgzNameWithPath):
884 <            txt += 'echo "tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'"\n'
884 >            txt += 'echo ">>> tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+' :" \n'
885              txt += 'tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'\n'
886 +            txt += 'ls -Al \n'
887              txt += 'untar_status=$? \n'
888              txt += 'if [ $untar_status -ne 0 ]; then \n'
889 <            txt += '   echo "SET_EXE 1 ==> ERROR Untarring .tgz file failed"\n'
890 <            txt += '   echo "JOB_EXIT_STATUS = $untar_status" \n'
891 <            txt += '   echo "JobExitCode=$untar_status" | tee -a $RUNTIME_AREA/$repo\n'
814 <            txt += '   if [ $middleware == OSG ]; then \n'
815 <            txt += '       echo "Remove working directory: $WORKING_DIR"\n'
816 <            txt += '       cd $RUNTIME_AREA\n'
817 <            txt += '       /bin/rm -rf $WORKING_DIR\n'
818 <            txt += '       if [ -d $WORKING_DIR ] ;then\n'
819 <            txt += '           echo "SET_EXE 50999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Untarring .tgz file failed"\n'
820 <            txt += '           echo "JOB_EXIT_STATUS = 50999"\n'
821 <            txt += '           echo "JobExitCode=50999" | tee -a $RUNTIME_AREA/$repo\n'
822 <            txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
823 <            txt += '           rm -f $RUNTIME_AREA/$repo \n'
824 <            txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
825 <            txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
826 <            txt += '       fi\n'
827 <            txt += '   fi \n'
828 <            txt += '   \n'
829 <            txt += '   exit 1 \n'
889 >            txt += '   echo "ERROR ==> Untarring .tgz file failed"\n'
890 >            txt += '   job_exit_code=$untar_status\n'
891 >            txt += '   func_exit\n'
892              txt += 'else \n'
893              txt += '   echo "Successful untar" \n'
894              txt += 'fi \n'
895 +            txt += '\n'
896 +            txt += 'echo ">>> Include ProdCommon in PYTHONPATH:"\n'
897 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
898 +            txt += '   export PYTHONPATH=$RUNTIME_AREA/ProdCommon\n'
899 +            txt += 'else\n'
900 +            txt += '   export PYTHONPATH=$RUNTIME_AREA/ProdCommon:${PYTHONPATH}\n'
901 +            txt += 'echo "PYTHONPATH=$PYTHONPATH"\n'
902 +            txt += 'fi\n'
903 +            txt += '\n'
904 +
905              pass
906 <        
906 >
907 >        return txt
908 >
909 >    def wsBuildExe(self, nj=0):
910 >        """
911 >        Put in the script the commands to build an executable
912 >        or a library.
913 >        """
914 >
915 >        txt = '\n#Written by cms_cmssw::wsBuildExe\n'
916 >        txt += 'echo ">>> moving CMSSW software directories in `pwd`" \n'
917 >
918 >        txt += 'rm -r lib/ module/ \n'
919 >        txt += 'mv $RUNTIME_AREA/lib/ . \n'
920 >        txt += 'mv $RUNTIME_AREA/module/ . \n'
921 >        if self.dataExist == True:
922 >            txt += 'rm -r src/ \n'
923 >            txt += 'mv $RUNTIME_AREA/src/ . \n'
924 >        if len(self.additional_inbox_files)>0:
925 >            for file in self.additional_inbox_files:
926 >                txt += 'mv $RUNTIME_AREA/'+os.path.basename(file)+' . \n'
927 >        txt += 'mv $RUNTIME_AREA/ProdCommon/ . \n'
928 >
929 >        txt += 'if [ -z "$PYTHONPATH" ]; then\n'
930 >        txt += '   export PYTHONPATH=$SOFTWARE_DIR/ProdCommon\n'
931 >        txt += 'else\n'
932 >        txt += '   export PYTHONPATH=$SOFTWARE_DIR/ProdCommon:${PYTHONPATH}\n'
933 >        txt += 'echo "PYTHONPATH=$PYTHONPATH"\n'
934 >        txt += 'fi\n'
935 >        txt += '\n'
936 >
937          return txt
938  
939      def modifySteeringCards(self, nj):
940          """
941 <        modify the card provided by the user,
941 >        modify the card provided by the user,
942          writing a new card into share dir
943          """
944 <        
944 >
945      def executableName(self):
946 <        return self.executable
946 >        if self.scriptExe:
947 >            return "sh "
948 >        else:
949 >            return self.executable
950  
951      def executableArgs(self):
952 <        return " -p pset.cfg"
952 >        # FUTURE: This function tests the CMSSW version. Can be simplified as we drop support for old versions
953 >        if self.scriptExe:#CarlosDaniele
954 >            return   self.scriptExe + " $NJob"
955 >        else:
956 >            ex_args = ""
957 >            # FUTURE: This tests the CMSSW version. Can remove code as versions deprecated
958 >            # Framework job report
959 >            if (self.CMSSW_major >= 1 and self.CMSSW_minor >= 5) or (self.CMSSW_major >= 2):
960 >                ex_args += " -j $RUNTIME_AREA/crab_fjr_$NJob.xml"
961 >            # Type of config file
962 >            if self.CMSSW_major >= 2 :
963 >                ex_args += " -p pset.py"
964 >            else:
965 >                ex_args += " -p pset.cfg"
966 >            return ex_args
967  
968      def inputSandbox(self, nj):
969          """
970          Returns a list of filenames to be put in JDL input sandbox.
971          """
972          inp_box = []
854        # dict added to delete duplicate from input sandbox file list
855        seen = {}
856        ## code
973          if os.path.isfile(self.tgzNameWithPath):
974              inp_box.append(self.tgzNameWithPath)
975 <        ## config
976 <        inp_box.append(common.job_list[nj].configFilename())
861 <        ## additional input files
862 <        #for file in self.additional_inbox_files:
863 <        #    inp_box.append(common.work_space.cwdDir()+file)
975 >        wrapper = os.path.basename(str(common._db.queryTask('scriptName')))
976 >        inp_box.append(common.work_space.pathForTgz() +'job/'+ wrapper)
977          return inp_box
978  
979      def outputSandbox(self, nj):
# Line 869 | Line 982 | class Cmssw(JobType):
982          """
983          out_box = []
984  
872        stdout=common.job_list[nj].stdout()
873        stderr=common.job_list[nj].stderr()
874
985          ## User Declared output files
986 <        for out in self.output_file:
987 <            n_out = nj + 1
986 >        for out in (self.output_file+self.output_file_sandbox):
987 >            n_out = nj + 1
988              out_box.append(self.numberFile_(out,str(n_out)))
989          return out_box
880        return []
990  
991      def prepareSteeringCards(self):
992          """
# Line 890 | Line 999 | class Cmssw(JobType):
999          Returns part of a job script which renames the produced files.
1000          """
1001  
1002 <        txt = '\n'
1003 <        txt += '# directory content\n'
1002 >        txt = '\n#Written by cms_cmssw::wsRenameOutput\n'
1003 >        txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n'
1004 >        txt += 'echo ">>> current directory content:"\n'
1005          txt += 'ls \n'
1006 <        file_list = ''
1007 <        for fileWithSuffix in self.output_file:
1006 >        txt += '\n'
1007 >
1008 >        for fileWithSuffix in (self.output_file):
1009              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
899            file_list=file_list+output_file_num+' '
1010              txt += '\n'
1011              txt += '# check output file\n'
1012 <            txt += 'ls '+fileWithSuffix+'\n'
1013 <            txt += 'ls_result=$?\n'
1014 <            #txt += 'exe_result=$?\n'
1015 <            txt += 'if [ $ls_result -ne 0 ] ; then\n'
1016 <            txt += '   echo "ERROR: Problem with output file"\n'
1017 <            #txt += '   echo "JOB_EXIT_STATUS = $exe_result"\n'
1018 <            #txt += '   echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
1019 <            #txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
1020 <            ### OLI_DANIELE
1021 <            if common.scheduler.boss_scheduler_name == 'condor_g':
1012 >            txt += 'if [ -e ./'+fileWithSuffix+' ] ; then\n'
1013 >            if (self.copy_data == 1):  # For OSG nodes, file is in $WORKING_DIR, should not be moved to $RUNTIME_AREA
1014 >                txt += '    mv '+fileWithSuffix+' '+output_file_num+'\n'
1015 >                txt += '    ln -s `pwd`/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\n'
1016 >            else:
1017 >                txt += '    mv '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1018 >                txt += '    ln -s $RUNTIME_AREA/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\n'
1019 >            txt += 'else\n'
1020 >            txt += '    job_exit_code=60302\n'
1021 >            txt += '    echo "WARNING: Output file '+fileWithSuffix+' not found"\n'
1022 >            if common.scheduler.name().upper() == 'CONDOR_G':
1023                  txt += '    if [ $middleware == OSG ]; then \n'
1024                  txt += '        echo "prepare dummy output file"\n'
1025                  txt += '        echo "Processing of job output failed" > $RUNTIME_AREA/'+output_file_num+'\n'
1026                  txt += '    fi \n'
916            txt += 'else\n'
917            txt += '   cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1027              txt += 'fi\n'
1028 <      
1029 <        txt += 'cd $RUNTIME_AREA\n'
1030 <        file_list=file_list[:-1]
1031 <        txt += 'file_list="'+file_list+'"\n'
1032 <        txt += 'cd $RUNTIME_AREA\n'
1033 <        ### OLI_DANIELE
1034 <        txt += 'if [ $middleware == OSG ]; then\n'  
1035 <        txt += '    cd $RUNTIME_AREA\n'
1036 <        txt += '    echo "Remove working directory: $WORKING_DIR"\n'
928 <        txt += '    /bin/rm -rf $WORKING_DIR\n'
929 <        txt += '    if [ -d $WORKING_DIR ] ;then\n'
930 <        txt += '        echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
931 <        txt += '        echo "JOB_EXIT_STATUS = 60999"\n'
932 <        txt += '        echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
933 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
934 <        txt += '        rm -f $RUNTIME_AREA/$repo \n'
935 <        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
936 <        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
937 <        txt += '    fi\n'
938 <        txt += 'fi\n'
1028 >        file_list = []
1029 >        for fileWithSuffix in (self.output_file):
1030 >             file_list.append(self.numberFile_(fileWithSuffix, '$NJob'))
1031 >
1032 >        txt += 'file_list="'+string.join(file_list,' ')+'"\n'
1033 >        txt += '\n'
1034 >        txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n'
1035 >        txt += 'echo ">>> current directory content:"\n'
1036 >        txt += 'ls \n'
1037          txt += '\n'
1038 +        txt += 'cd $RUNTIME_AREA\n'
1039 +        txt += 'echo ">>> current directory (RUNTIME_AREA):  $RUNTIME_AREA"\n'
1040          return txt
1041  
1042      def numberFile_(self, file, txt):
# Line 947 | Line 1047 | class Cmssw(JobType):
1047          # take away last extension
1048          name = p[0]
1049          for x in p[1:-1]:
1050 <           name=name+"."+x
1050 >            name=name+"."+x
1051          # add "_txt"
1052          if len(p)>1:
1053 <          ext = p[len(p)-1]
1054 <          #result = name + '_' + str(txt) + "." + ext
955 <          result = name + '_' + txt + "." + ext
1053 >            ext = p[len(p)-1]
1054 >            result = name + '_' + txt + "." + ext
1055          else:
1056 <          #result = name + '_' + str(txt)
1057 <          result = name + '_' + txt
959 <        
1056 >            result = name + '_' + txt
1057 >
1058          return result
1059  
1060 <    def getRequirements(self):
1060 >    def getRequirements(self, nj=[]):
1061          """
1062 <        return job requirements to add to jdl files
1062 >        return job requirements to add to jdl files
1063          """
1064          req = ''
1065 <        if common.analisys_common_info['sw_version']:
1065 >        if self.version:
1066              req='Member("VO-cms-' + \
1067 <                 common.analisys_common_info['sw_version'] + \
1067 >                 self.version + \
1068 >                 '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1069 >        if self.executable_arch:
1070 >            req+=' && Member("VO-cms-' + \
1071 >                 self.executable_arch + \
1072                   '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1073 <        if common.analisys_common_info['sites']:
1074 <            if len(common.analisys_common_info['sites'])>0:
1075 <                req = req + ' && ('
1076 <                for i in range(len(common.analisys_common_info['sites'])):
1077 <                    req = req + 'other.GlueCEInfoHostName == "' \
976 <                         + common.analisys_common_info['sites'][i] + '"'
977 <                    if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
978 <                        req = req + ' || '
979 <            req = req + ')'
980 <        #print "req = ", req
1073 >
1074 >        req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
1075 >        if common.scheduler.name() == "glitecoll":
1076 >            req += ' && other.GlueCEStateStatus == "Production" '
1077 >
1078          return req
1079  
1080      def configFilename(self):
1081          """ return the config filename """
1082 <        return self.name()+'.cfg'
1082 >        # FUTURE: Can remove cfg mode for CMSSW >= 2_1_x
1083 >        if (self.CMSSW_major >= 2 and self.CMSSW_minor >= 1) or (self.CMSSW_major >= 3):
1084 >          return self.name()+'.py'
1085 >        else:
1086 >          return self.name()+'.cfg'
1087  
987    ### OLI_DANIELE
1088      def wsSetupCMSOSGEnvironment_(self):
1089          """
1090          Returns part of a job script which is prepares
1091          the execution environment and which is common for all CMS jobs.
1092          """
1093 <        txt = '\n'
1094 <        txt += '   echo "### SETUP CMS OSG  ENVIRONMENT ###"\n'
1095 <        txt += '   if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
1096 <        txt += '      # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
1097 <        txt += '       source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
1098 <        txt += '   elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
1099 <        txt += '      # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
1100 <        txt += '       source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
1101 <        txt += '   else\n'
1102 <        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1103 <        txt += '       echo "JOB_EXIT_STATUS = 10020"\n'
1104 <        txt += '       echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1105 <        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1006 <        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1007 <        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1008 <        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1009 <        txt += '       exit 1\n'
1010 <        txt += '\n'
1011 <        txt += '       echo "Remove working directory: $WORKING_DIR"\n'
1012 <        txt += '       cd $RUNTIME_AREA\n'
1013 <        txt += '       /bin/rm -rf $WORKING_DIR\n'
1014 <        txt += '       if [ -d $WORKING_DIR ] ;then\n'
1015 <        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1016 <        txt += '            echo "JOB_EXIT_STATUS = 10017"\n'
1017 <        txt += '            echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
1018 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
1019 <        txt += '            rm -f $RUNTIME_AREA/$repo \n'
1020 <        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1021 <        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1022 <        txt += '       fi\n'
1023 <        txt += '\n'
1024 <        txt += '       exit 1\n'
1025 <        txt += '   fi\n'
1093 >        txt = '\n#Written by cms_cmssw::wsSetupCMSOSGEnvironment_\n'
1094 >        txt += '    echo ">>> setup CMS OSG environment:"\n'
1095 >        txt += '    echo "set SCRAM ARCH to ' + self.executable_arch + '"\n'
1096 >        txt += '    export SCRAM_ARCH='+self.executable_arch+'\n'
1097 >        txt += '    echo "SCRAM_ARCH = $SCRAM_ARCH"\n'
1098 >        txt += '    if [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n'
1099 >        txt += '      # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n'
1100 >        txt += '        source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n'
1101 >        txt += '    else\n'
1102 >        txt += '        echo "ERROR ==> $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1103 >        txt += '        job_exit_code=10020\n'
1104 >        txt += '        func_exit\n'
1105 >        txt += '    fi\n'
1106          txt += '\n'
1107 <        txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1108 <        txt += '   echo " END SETUP CMS OSG  ENVIRONMENT "\n'
1107 >        txt += '    echo "==> setup cms environment ok"\n'
1108 >        txt += '    echo "SCRAM_ARCH = $SCRAM_ARCH"\n'
1109  
1110          return txt
1111 <
1032 <    ### OLI_DANIELE
1111 >
1112      def wsSetupCMSLCGEnvironment_(self):
1113          """
1114          Returns part of a job script which is prepares
1115          the execution environment and which is common for all CMS jobs.
1116          """
1117 <        txt  = '   \n'
1118 <        txt += '   echo " ### SETUP CMS LCG  ENVIRONMENT ### "\n'
1119 <        txt += '   if [ ! $VO_CMS_SW_DIR ] ;then\n'
1120 <        txt += '       echo "SET_CMS_ENV 10031 ==> ERROR CMS software dir not found on WN `hostname`"\n'
1121 <        txt += '       echo "JOB_EXIT_STATUS = 10031" \n'
1122 <        txt += '       echo "JobExitCode=10031" | tee -a $RUNTIME_AREA/$repo\n'
1123 <        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1124 <        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1125 <        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1126 <        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1127 <        txt += '       exit 1\n'
1128 <        txt += '   else\n'
1129 <        txt += '       echo "Sourcing environment... "\n'
1130 <        txt += '       if [ ! -s $VO_CMS_SW_DIR/cmsset_default.sh ] ;then\n'
1131 <        txt += '           echo "SET_CMS_ENV 10020 ==> ERROR cmsset_default.sh file not found into dir $VO_CMS_SW_DIR"\n'
1132 <        txt += '           echo "JOB_EXIT_STATUS = 10020"\n'
1133 <        txt += '           echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1134 <        txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1135 <        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1136 <        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1137 <        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1138 <        txt += '           exit 1\n'
1139 <        txt += '       fi\n'
1140 <        txt += '       echo "sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1141 <        txt += '       source $VO_CMS_SW_DIR/cmsset_default.sh\n'
1142 <        txt += '       result=$?\n'
1143 <        txt += '       if [ $result -ne 0 ]; then\n'
1144 <        txt += '           echo "SET_CMS_ENV 10032 ==> ERROR problem sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1145 <        txt += '           echo "JOB_EXIT_STATUS = 10032"\n'
1146 <        txt += '           echo "JobExitCode=10032" | tee -a $RUNTIME_AREA/$repo\n'
1147 <        txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1148 <        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1149 <        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1150 <        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1151 <        txt += '           exit 1\n'
1152 <        txt += '       fi\n'
1153 <        txt += '   fi\n'
1154 <        txt += '   \n'
1155 <        txt += '   string=`cat /etc/redhat-release`\n'
1156 <        txt += '   echo $string\n'
1157 <        txt += '   if [[ $string = *alhalla* ]]; then\n'
1158 <        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1159 <        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
1160 <        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
1161 <        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1162 <        txt += '   else\n'
1163 <        txt += '       echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n'
1164 <        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
1165 <        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
1166 <        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1167 <        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1168 <        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1169 <        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1170 <        txt += '       exit 1\n'
1171 <        txt += '   fi\n'
1172 <        txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1173 <        txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1117 >        txt = '\n#Written by cms_cmssw::wsSetupCMSLCGEnvironment_\n'
1118 >        txt += '    echo ">>> setup CMS LCG environment:"\n'
1119 >        txt += '    echo "set SCRAM ARCH and BUILD_ARCH to ' + self.executable_arch + ' ###"\n'
1120 >        txt += '    export SCRAM_ARCH='+self.executable_arch+'\n'
1121 >        txt += '    export BUILD_ARCH='+self.executable_arch+'\n'
1122 >        txt += '    if [ ! $VO_CMS_SW_DIR ] ;then\n'
1123 >        txt += '        echo "ERROR ==> CMS software dir not found on WN `hostname`"\n'
1124 >        txt += '        job_exit_code=10031\n'
1125 >        txt += '        func_exit\n'
1126 >        txt += '    else\n'
1127 >        txt += '        echo "Sourcing environment... "\n'
1128 >        txt += '        if [ ! -s $VO_CMS_SW_DIR/cmsset_default.sh ] ;then\n'
1129 >        txt += '            echo "ERROR ==> cmsset_default.sh file not found into dir $VO_CMS_SW_DIR"\n'
1130 >        txt += '            job_exit_code=10020\n'
1131 >        txt += '            func_exit\n'
1132 >        txt += '        fi\n'
1133 >        txt += '        echo "sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1134 >        txt += '        source $VO_CMS_SW_DIR/cmsset_default.sh\n'
1135 >        txt += '        result=$?\n'
1136 >        txt += '        if [ $result -ne 0 ]; then\n'
1137 >        txt += '            echo "ERROR ==> problem sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1138 >        txt += '            job_exit_code=10032\n'
1139 >        txt += '            func_exit\n'
1140 >        txt += '        fi\n'
1141 >        txt += '    fi\n'
1142 >        txt += '    \n'
1143 >        txt += '    echo "==> setup cms environment ok"\n'
1144 >        return txt
1145 >
1146 >    def modifyReport(self, nj):
1147 >        """
1148 >        insert the part of the script that modifies the FrameworkJob Report
1149 >        """
1150 >        txt = '\n#Written by cms_cmssw::modifyReport\n'
1151 >        publish_data = int(self.cfg_params.get('USER.publish_data',0))
1152 >        if (publish_data == 1):
1153 >            processedDataset = self.cfg_params['USER.publish_data_name']
1154 >            LFNBaseName = LFNBase(processedDataset)
1155 >
1156 >            txt += 'if [ $copy_exit_status -eq 0 ]; then\n'
1157 >            txt += '    FOR_LFN=%s_${PSETHASH}/\n'%(LFNBaseName)
1158 >            txt += 'else\n'
1159 >            txt += '    FOR_LFN=/copy_problems/ \n'
1160 >            txt += '    SE=""\n'
1161 >            txt += '    SE_PATH=""\n'
1162 >            txt += 'fi\n'
1163 >
1164 >            txt += 'echo ">>> Modify Job Report:" \n'
1165 >            txt += 'chmod a+x $SOFTWARE_DIR/ProdCommon/ProdCommon/FwkJobRep/ModifyJobReport.py\n'
1166 >            txt += 'ProcessedDataset='+processedDataset+'\n'
1167 >            txt += 'echo "ProcessedDataset = $ProcessedDataset"\n'
1168 >            txt += 'echo "SE = $SE"\n'
1169 >            txt += 'echo "SE_PATH = $SE_PATH"\n'
1170 >            txt += 'echo "FOR_LFN = $FOR_LFN" \n'
1171 >            txt += 'echo "CMSSW_VERSION = $CMSSW_VERSION"\n\n'
1172 >            txt += 'echo "$SOFTWARE_DIR/ProdCommon/ProdCommon/FwkJobRep/ModifyJobReport.py $RUNTIME_AREA/crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n'
1173 >            txt += '$SOFTWARE_DIR/ProdCommon/ProdCommon/FwkJobRep/ModifyJobReport.py $RUNTIME_AREA/crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n'
1174 >            txt += 'modifyReport_result=$?\n'
1175 >            txt += 'if [ $modifyReport_result -ne 0 ]; then\n'
1176 >            txt += '    modifyReport_result=70500\n'
1177 >            txt += '    job_exit_code=$modifyReport_result\n'
1178 >            txt += '    echo "ModifyReportResult=$modifyReport_result" | tee -a $RUNTIME_AREA/$repo\n'
1179 >            txt += '    echo "WARNING: Problem with ModifyJobReport"\n'
1180 >            txt += 'else\n'
1181 >            txt += '    mv NewFrameworkJobReport.xml $RUNTIME_AREA/crab_fjr_$NJob.xml\n'
1182 >            txt += 'fi\n'
1183 >        return txt
1184 >
1185 >    def wsParseFJR(self):  
1186 >        """
1187 >        Parse the FrameworkJobReport to obtain useful infos
1188 >        """
1189 >        txt = '\n#Written by cms_cmssw::wsParseFJR\n'
1190 >        txt += 'echo ">>> Parse FrameworkJobReport crab_fjr.xml"\n'
1191 >        txt += 'if [ -s $RUNTIME_AREA/crab_fjr_$NJob.xml ]; then\n'
1192 >        txt += '    if [ -s $RUNTIME_AREA/parseCrabFjr.py ]; then\n'
1193 >        txt += '        cmd_out=`python $RUNTIME_AREA/parseCrabFjr.py --input $RUNTIME_AREA/crab_fjr_$NJob.xml --MonitorID $MonitorID --MonitorJobID $MonitorJobID`\n'
1194 >        txt += '        echo "Result of parsing the FrameworkJobReport crab_fjr.xml: $cmd_out"\n'
1195 >        txt += '        executable_exit_status=`echo $cmd_out | awk -F\; "{print $1}" | awk -F ' ' "{print $NF}"`\n'
1196 >        txt += '        if [ $executable_exit_status -eq 50115 ];then\n'
1197 >        txt += '            echo ">>> crab_fjr.xml contents: "\n'
1198 >        txt += '            cat $RUNTIME_AREA/crab_fjr_NJob.xml\n'
1199 >        txt += '            echo "Wrong FrameworkJobReport --> does not contain useful info. ExitStatus: $executable_exit_status"\n'
1200 >        txt += '        else\n'
1201 >        txt += '            echo "Extracted ExitStatus from FrameworkJobReport parsing output: $executable_exit_status"\n'
1202 >        txt += '        fi\n'
1203 >        txt += '    else\n'
1204 >        txt += '        echo "CRAB python script to parse CRAB FrameworkJobReport crab_fjr.xml is not available, using exit code of executable from command line."\n'
1205 >        txt += '    fi\n'
1206 >          #### Patch to check input data reading for CMSSW16x Hopefully we-ll remove it asap
1207 >
1208 >        if self.datasetPath:
1209 >          # VERIFY PROCESSED DATA
1210 >            txt += '    if [ $executable_exit_status -eq 0 ];then\n'
1211 >            txt += '      echo ">>> Verify list of processed files:"\n'
1212 >            txt += '      echo $InputFiles |tr -d "\\" |tr "," \n"|tr -d "\"" > input-files.txt\n'
1213 >            txt += '      grep LFN $RUNTIME_AREA/crab_fjr_$NJob.xml |cut -d">" -f2|cut -d"<" -f1|grep "/" > processed-files.txt\n'
1214 >            txt += '      cat input-files.txt  | sort | uniq > tmp.txt\n'
1215 >            txt += '      mv tmp.txt input-files.txt\n'
1216 >            txt += '      echo "cat input-files.txt"\n'
1217 >            txt += '      echo "----------------------"\n'
1218 >            txt += '      cat input-files.txt\n'
1219 >            txt += '      cat processed-files.txt | sort | uniq > tmp.txt\n'
1220 >            txt += '      mv tmp.txt processed-files.txt\n'
1221 >            txt += '      echo "----------------------"\n'
1222 >            txt += '      echo "cat processed-files.txt"\n'
1223 >            txt += '      echo "----------------------"\n'
1224 >            txt += '      cat processed-files.txt\n'
1225 >            txt += '      echo "----------------------"\n'
1226 >            txt += '      diff -q input-files.txt processed-files.txt\n'
1227 >            txt += '      fileverify_status=$?\n'
1228 >            txt += '      if [ $fileverify_status -ne 0 ]; then\n'
1229 >            txt += '         executable_exit_status=30001\n'
1230 >            txt += '         echo "ERROR ==> not all input files processed"\n'
1231 >            txt += '         echo "      ==> list of processed files from crab_fjr.xml differs from list in pset.cfg"\n'
1232 >            txt += '         echo "      ==> diff input-files.txt processed-files.txt"\n'
1233 >            txt += '      fi\n'
1234 >            txt += '    fi\n'
1235 >            txt += '\n'
1236 >        txt += 'else\n'
1237 >        txt += '    echo "CRAB FrameworkJobReport crab_fjr.xml is not available, using exit code of executable from command line."\n'
1238 >        txt += 'fi\n'
1239 >        txt += '\n'
1240 >        txt += 'echo "ExeExitCode=$executable_exit_status" | tee -a $RUNTIME_AREA/$repo\n'
1241 >        txt += 'echo "EXECUTABLE_EXIT_STATUS = $executable_exit_status"\n'
1242 >        txt += 'job_exit_code=$executable_exit_status\n'
1243 >
1244          return txt
1245  
1246      def setParam_(self, param, value):
# Line 1100 | Line 1249 | class Cmssw(JobType):
1249      def getParams(self):
1250          return self._params
1251  
1252 <    def setTaskid_(self):
1253 <        self._taskId = self.cfg_params['taskId']
1254 <        
1255 <    def getTaskid(self):
1256 <        return self._taskId
1252 >    def uniquelist(self, old):
1253 >        """
1254 >        remove duplicates from a list
1255 >        """
1256 >        nd={}
1257 >        for e in old:
1258 >            nd[e]=0
1259 >        return nd.keys()
1260 >
1261 >    def outList(self):
1262 >        """
1263 >        check the dimension of the output files
1264 >        """
1265 >        txt = ''
1266 >        txt += 'echo ">>> list of expected files on output sandbox"\n'
1267 >        listOutFiles = []
1268 >        stdout = 'CMSSW_$NJob.stdout'
1269 >        stderr = 'CMSSW_$NJob.stderr'
1270 >        if (self.return_data == 1):
1271 >            for file in (self.output_file+self.output_file_sandbox):
1272 >                listOutFiles.append(self.numberFile_(file, '$NJob'))
1273 >            listOutFiles.append(stdout)
1274 >            listOutFiles.append(stderr)
1275 >        else:
1276 >            for file in (self.output_file_sandbox):
1277 >                listOutFiles.append(self.numberFile_(file, '$NJob'))
1278 >            listOutFiles.append(stdout)
1279 >            listOutFiles.append(stderr)
1280 >        txt += 'echo "output files: '+string.join(listOutFiles,' ')+'"\n'
1281 >        txt += 'filesToCheck="'+string.join(listOutFiles,' ')+'"\n'
1282 >        txt += 'export filesToCheck\n'
1283 >        return txt

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines