ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_cmssw.py
(Generate patch)

Comparing COMP/CRAB/python/cms_cmssw.py (file contents):
Revision 1.13 by gutsche, Tue Jun 27 02:31:31 2006 UTC vs.
Revision 1.178 by spiga, Sun Apr 20 09:34:40 2008 UTC

# Line 2 | Line 2 | from JobType import JobType
2   from crab_logger import Logger
3   from crab_exceptions import *
4   from crab_util import *
5 + from BlackWhiteListParser import BlackWhiteListParser
6   import common
6 import PsetManipulator  
7
8 import DBSInfo_EDM
9 import DataDiscovery_EDM
10 import DataLocation_EDM
7   import Scram
8 + from LFNBaseName import *
9  
10 < import os, string, re
10 > import os, string, glob
11  
12   class Cmssw(JobType):
13 <    def __init__(self, cfg_params):
13 >    def __init__(self, cfg_params, ncjobs):
14          JobType.__init__(self, 'CMSSW')
15          common.logger.debug(3,'CMSSW::__init__')
16  
17 <        self.analisys_common_info = {}
18 <        # Marco.
17 >        self.argsList = []
18 >
19          self._params = {}
20          self.cfg_params = cfg_params
21 +        # init BlackWhiteListParser
22 +        self.blackWhiteListParser = BlackWhiteListParser(cfg_params)
23 +
24 +        self.MaxTarBallSize = float(self.cfg_params.get('EDG.maxtarballsize',9.5))
25 +
26 +        # number of jobs requested to be created, limit obj splitting
27 +        self.ncjobs = ncjobs
28 +
29          log = common.logger
30 <        
30 >
31          self.scram = Scram.Scram(cfg_params)
27        scramArea = ''
32          self.additional_inbox_files = []
33          self.scriptExe = ''
34          self.executable = ''
35 +        self.executable_arch = self.scram.getArch()
36          self.tgz_name = 'default.tgz'
37 +        self.additional_tgz_name = 'additional.tgz'
38 +        self.scriptName = 'CMSSW.sh'
39 +        self.pset = ''      #scrip use case Da
40 +        self.datasetPath = '' #scrip use case Da
41  
42 +        # set FJR file name
43 +        self.fjrFileName = 'crab_fjr.xml'
44  
45          self.version = self.scram.getSWVersion()
46 <        self.setParam_('application', self.version)
47 <        common.analisys_common_info['sw_version'] = self.version
48 <        ### FEDE
49 <        common.analisys_common_info['copy_input_data'] = 0
50 <        common.analisys_common_info['events_management'] = 1
46 >
47 >        #
48 >        # Try to block creation in case of arch/version mismatch
49 >        #
50 >
51 > #        a = string.split(self.version, "_")
52 > #
53 > #        if int(a[1]) == 1 and (int(a[2]) < 5 and self.executable_arch.find('slc4') == 0):
54 > #            msg = "Warning: You are using %s version of CMSSW  with %s architecture. \n--> Did you compile your libraries with SLC3? Otherwise you can find some problems running on SLC4 Grid nodes.\n"%(self.version, self.executable_arch)
55 > #            common.logger.message(msg)
56 > #        if int(a[1]) == 1 and (int(a[2]) >= 5 and self.executable_arch.find('slc3') == 0):
57 > #            msg = "Error: CMS does not support %s with %s architecture"%(self.version, self.executable_arch)
58 > #            raise CrabException(msg)
59 > #
60 >
61  
62          ### collect Data cards
42        try:
43            tmp =  cfg_params['CMSSW.datasetpath']
44            log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
45            if string.lower(tmp)=='none':
46                self.datasetPath = None
47            else:
48                self.datasetPath = tmp
49        except KeyError:
50            msg = "Error: datasetpath not defined "  
51            raise CrabException(msg)
63  
64 <        # ML monitoring
65 <        # split dataset path style: /PreProdR3Minbias/SIM/GEN-SIM
66 <        if not self.datasetPath:
67 <            self.setParam_('dataset', 'None')
68 <            self.setParam_('owner', 'None')
64 >        if not cfg_params.has_key('CMSSW.datasetpath'):
65 >            msg = "Error: datasetpath not defined "
66 >            raise CrabException(msg)
67 >        tmp =  cfg_params['CMSSW.datasetpath']
68 >        log.debug(6, "CMSSW::CMSSW(): datasetPath = "+tmp)
69 >        if string.lower(tmp)=='none':
70 >            self.datasetPath = None
71 >            self.selectNoInput = 1
72          else:
73 <            datasetpath_split = self.datasetPath.split("/")
74 <            self.setParam_('dataset', datasetpath_split[1])
61 <            self.setParam_('owner', datasetpath_split[-1])
62 <
63 <        self.setTaskid_()
64 <        self.setParam_('taskId', self.cfg_params['taskId'])
73 >            self.datasetPath = tmp
74 >            self.selectNoInput = 0
75  
76          self.dataTiers = []
77  
78          ## now the application
79 <        try:
80 <            self.executable = cfg_params['CMSSW.executable']
71 <            self.setParam_('exe', self.executable)
72 <            log.debug(6, "CMSSW::CMSSW(): executable = "+self.executable)
73 <            msg = "Default executable cmsRun overridden. Switch to " + self.executable
74 <            log.debug(3,msg)
75 <        except KeyError:
76 <            self.executable = 'cmsRun'
77 <            self.setParam_('exe', self.executable)
78 <            msg = "User executable not defined. Use cmsRun"
79 <            log.debug(3,msg)
80 <            pass
79 >        self.executable = cfg_params.get('CMSSW.executable','cmsRun')
80 >        log.debug(6, "CMSSW::CMSSW(): executable = "+self.executable)
81  
82 <        try:
83 <            self.pset = cfg_params['CMSSW.pset']
84 <            log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
82 >        if not cfg_params.has_key('CMSSW.pset'):
83 >            raise CrabException("PSet file missing. Cannot run cmsRun ")
84 >        self.pset = cfg_params['CMSSW.pset']
85 >        log.debug(6, "Cmssw::Cmssw(): PSet file = "+self.pset)
86 >        if self.pset.lower() != 'none' :
87              if (not os.path.exists(self.pset)):
88                  raise CrabException("User defined PSet file "+self.pset+" does not exist")
89 <        except KeyError:
90 <            raise CrabException("PSet file missing. Cannot run cmsRun ")
89 >        else:
90 >            self.pset = None
91  
92          # output files
93 <        try:
94 <            self.output_file = []
93 >        ## stuff which must be returned always via sandbox
94 >        self.output_file_sandbox = []
95  
96 <            tmp = cfg_params['CMSSW.output_file']
97 <            if tmp != '':
98 <                tmpOutFiles = string.split(cfg_params['CMSSW.output_file'],',')
99 <                log.debug(7, 'cmssw::cmssw(): output files '+str(tmpOutFiles))
100 <                for tmp in tmpOutFiles:
101 <                    tmp=string.strip(tmp)
102 <                    self.output_file.append(tmp)
103 <                    pass
104 <            else:
105 <                log.message("No output file defined: only stdout/err will be available")
96 >        # add fjr report by default via sandbox
97 >        self.output_file_sandbox.append(self.fjrFileName)
98 >
99 >        # other output files to be returned via sandbox or copied to SE
100 >        self.output_file = []
101 >        tmp = cfg_params.get('CMSSW.output_file',None)
102 >        if tmp :
103 >            tmpOutFiles = string.split(tmp,',')
104 >            log.debug(7, 'cmssw::cmssw(): output files '+str(tmpOutFiles))
105 >            for tmp in tmpOutFiles:
106 >                tmp=string.strip(tmp)
107 >                self.output_file.append(tmp)
108                  pass
109 <            pass
110 <        except KeyError:
111 <            log.message("No output file defined: only stdout/err will be available")
108 <            pass
109 >        else:
110 >            log.message("No output file defined: only stdout/err and the CRAB Framework Job Report will be available\n")
111 >        pass
112  
113          # script_exe file as additional file in inputSandbox
114 <        try:
115 <            self.scriptExe = cfg_params['USER.script_exe']
116 <            self.additional_inbox_files.append(self.scriptExe)
117 <            if self.scriptExe != '':
118 <               if not os.path.isfile(self.scriptExe):
119 <                  msg ="WARNING. file "+self.scriptExe+" not found"
120 <                  raise CrabException(msg)
121 <        except KeyError:
122 <           pass
123 <                  
114 >        self.scriptExe = cfg_params.get('USER.script_exe',None)
115 >        if self.scriptExe :
116 >            if not os.path.isfile(self.scriptExe):
117 >                msg ="ERROR. file "+self.scriptExe+" not found"
118 >                raise CrabException(msg)
119 >            self.additional_inbox_files.append(string.strip(self.scriptExe))
120 >
121 >        #CarlosDaniele
122 >        if self.datasetPath == None and self.pset == None and self.scriptExe == '' :
123 >            msg ="Error. script_exe  not defined"
124 >            raise CrabException(msg)
125 >
126          ## additional input files
127 <        try:
128 <            tmpAddFiles = string.split(cfg_params['CMSSW.additional_input_files'],',')
127 >        if cfg_params.has_key('USER.additional_input_files'):
128 >            tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
129              for tmp in tmpAddFiles:
130 <                if not os.path.exists(tmp):
131 <                    raise CrabException("Additional input file not found: "+tmp)
132 <                tmp=string.strip(tmp)
133 <                self.additional_inbox_files.append(tmp)
130 >                tmp = string.strip(tmp)
131 >                dirname = ''
132 >                if not tmp[0]=="/": dirname = "."
133 >                files = []
134 >                if string.find(tmp,"*")>-1:
135 >                    files = glob.glob(os.path.join(dirname, tmp))
136 >                    if len(files)==0:
137 >                        raise CrabException("No additional input file found with this pattern: "+tmp)
138 >                else:
139 >                    files.append(tmp)
140 >                for file in files:
141 >                    if not os.path.exists(file):
142 >                        raise CrabException("Additional input file not found: "+file)
143 >                    pass
144 >                    # fname = string.split(file, '/')[-1]
145 >                    # storedFile = common.work_space.pathForTgz()+'share/'+fname
146 >                    # shutil.copyfile(file, storedFile)
147 >                    self.additional_inbox_files.append(string.strip(file))
148                  pass
149              pass
150 <        except KeyError:
151 <            pass
133 <
134 <        # files per job
135 <        try:
136 <            self.filesPerJob = int(cfg_params['CMSSW.files_per_jobs']) #Daniele
137 <            self.selectFilesPerJob = 1
138 <        except KeyError:
139 <            self.filesPerJob = 0
140 <            self.selectFilesPerJob = 0
150 >            common.logger.debug(5,"Additional input files: "+str(self.additional_inbox_files))
151 >        pass
152  
153          ## Events per job
154 <        try:
154 >        if cfg_params.has_key('CMSSW.events_per_job'):
155              self.eventsPerJob =int( cfg_params['CMSSW.events_per_job'])
156              self.selectEventsPerJob = 1
157 <        except KeyError:
157 >        else:
158              self.eventsPerJob = -1
159              self.selectEventsPerJob = 0
149    
150        # To be implemented
151        # ## number of jobs
152        # try:
153        #     self.numberOfJobs =int( cfg_params['CMSSW.number_of_job'])
154        #     self.selectNumberOfJobs = 1
155        # except KeyError:
156        #     self.selectNumberOfJobs = 0
160  
161 <        if (self.selectFilesPerJob == self.selectEventsPerJob):
162 <            msg = 'Must define either files_per_jobs or events_per_job'
163 <            raise CrabException(msg)
161 >        ## number of jobs
162 >        if cfg_params.has_key('CMSSW.number_of_jobs'):
163 >            self.theNumberOfJobs =int( cfg_params['CMSSW.number_of_jobs'])
164 >            self.selectNumberOfJobs = 1
165 >        else:
166 >            self.theNumberOfJobs = 0
167 >            self.selectNumberOfJobs = 0
168  
169 <        if (self.selectEventsPerJob  and not self.datasetPath == None):
163 <            msg = 'Splitting according to events_per_job available only with None as datasetpath'
164 <            raise CrabException(msg)
165 <    
166 <        try:
169 >        if cfg_params.has_key('CMSSW.total_number_of_events'):
170              self.total_number_of_events = int(cfg_params['CMSSW.total_number_of_events'])
171 <        except KeyError:
172 <            msg = 'Must define total_number_of_events'
173 <            raise CrabException(msg)
174 <        
172 <        CEBlackList = []
173 <        try:
174 <            tmpBad = string.split(cfg_params['EDG.ce_black_list'],',')
175 <            for tmp in tmpBad:
176 <                tmp=string.strip(tmp)
177 <                CEBlackList.append(tmp)
178 <        except KeyError:
179 <            pass
180 <
181 <        self.reCEBlackList=[]
182 <        for bad in CEBlackList:
183 <            self.reCEBlackList.append(re.compile( bad ))
184 <
185 <        common.logger.debug(5,'CEBlackList: '+str(CEBlackList))
186 <
187 <        CEWhiteList = []
188 <        try:
189 <            tmpGood = string.split(cfg_params['EDG.ce_white_list'],',')
190 <            for tmp in tmpGood:
191 <                tmp=string.strip(tmp)
192 <                CEWhiteList.append(tmp)
193 <        except KeyError:
194 <            pass
171 >            self.selectTotalNumberEvents = 1
172 >        else:
173 >            self.total_number_of_events = 0
174 >            self.selectTotalNumberEvents = 0
175  
176 <        #print 'CEWhiteList: ',CEWhiteList
177 <        self.reCEWhiteList=[]
178 <        for Good in CEWhiteList:
179 <            self.reCEWhiteList.append(re.compile( Good ))
176 >        if self.pset != None: #CarlosDaniele
177 >             if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
178 >                 msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
179 >                 raise CrabException(msg)
180 >        else:
181 >             if (self.selectNumberOfJobs == 0):
182 >                 msg = 'Must specify  number_of_jobs.'
183 >                 raise CrabException(msg)
184 >
185 >        ## New method of dealing with seeds
186 >        self.incrementSeeds = []
187 >        self.preserveSeeds = []
188 >        if cfg_params.has_key('CMSSW.preserve_seeds'):
189 >            tmpList = cfg_params['CMSSW.preserve_seeds'].split(',')
190 >            for tmp in tmpList:
191 >                tmp.strip()
192 >                self.preserveSeeds.append(tmp)
193 >        if cfg_params.has_key('CMSSW.increment_seeds'):
194 >            tmpList = cfg_params['CMSSW.increment_seeds'].split(',')
195 >            for tmp in tmpList:
196 >                tmp.strip()
197 >                self.incrementSeeds.append(tmp)
198 >
199 >        ## Old method of dealing with seeds
200 >        ## FUTURE: This is for old CMSSW and old CRAB. Can throw exceptions after a couple of CRAB releases and then
201 >        ## remove
202 >        self.sourceSeed = cfg_params.get('CMSSW.pythia_seed',None)
203 >        if self.sourceSeed:
204 >            print "pythia_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
205 >            self.incrementSeeds.append('sourceSeed')
206 >
207 >        self.sourceSeedVtx = cfg_params.get('CMSSW.vtx_seed',None)
208 >        if self.sourceSeedVtx:
209 >            print "vtx_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
210 >            self.incrementSeeds.append('VtxSmeared')
211 >
212 >        self.sourceSeedG4 = cfg_params.get('CMSSW.g4_seed',None)
213 >        if self.sourceSeedG4:
214 >            print "g4_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
215 >            self.incrementSeeds.append('g4SimHits')
216 >
217 >        self.sourceSeedMix = cfg_params.get('CMSSW.mix_seed',None)
218 >        if self.sourceSeedMix:
219 >            print "mix_seed is a deprecated parameter. Use preserve_seeds or increment_seeds in the future.\n","Added to increment_seeds."
220 >            self.incrementSeeds.append('mix')
221 >
222 >        self.firstRun = cfg_params.get('CMSSW.first_run',None)
223 >
224 >        if self.pset != None: #CarlosDaniele
225 >            import PsetManipulator as pp
226 >            PsetEdit = pp.PsetManipulator(self.pset) #Daniele Pset
227  
228 <        common.logger.debug(5,'CEWhiteList: '+str(CEWhiteList))
228 >        # Copy/return
229  
230 <        self.PsetEdit = PsetManipulator.PsetManipulator(self.pset) #Daniele Pset
230 >        self.copy_data = int(cfg_params.get('USER.copy_data',0))
231 >        self.return_data = int(cfg_params.get('USER.return_data',0))
232  
233          #DBSDLS-start
234 <        ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
234 >        ## Initialize the variables that are extracted from DBS/DLS and needed in other places of the code
235          self.maxEvents=0  # max events available   ( --> check the requested nb. of evts in Creator.py)
236          self.DBSPaths={}  # all dbs paths requested ( --> input to the site local discovery script)
237 +        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
238          ## Perform the data location and discovery (based on DBS/DLS)
239          ## SL: Don't if NONE is specified as input (pythia use case)
240 <        common.analisys_common_info['sites']=None
240 >        blockSites = {}
241          if self.datasetPath:
242 <            self.DataDiscoveryAndLocation(cfg_params)
243 <        #DBSDLS-end          
242 >            blockSites = self.DataDiscoveryAndLocation(cfg_params)
243 >        #DBSDLS-end
244  
245          self.tgzNameWithPath = self.getTarBall(self.executable)
246  
218        # modify Pset
219        if (self.datasetPath): # standard job
220            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
221            self.PsetEdit.inputModule("INPUT") #Daniele
222
223        else:  # pythia like job
224            self.PsetEdit.maxEvent(self.eventsPerJob) #Daniele  
225            self.PsetEdit.pythiaSeed("INPUT") #Daniele
226            try:
227                self.sourceSeed = int(cfg_params['CMSSW.pythia_seed'])
228            except KeyError:
229                self.sourceSeed = 123456
230                common.logger.message("No seed given, will use "+str(self.sourceSeed))
231        
232        self.PsetEdit.psetWriter(self.configFilename())
233    
247          ## Select Splitting
248 <        if self.selectFilesPerJob: self.jobSplittingPerFiles()
249 <        elif self.selectEventsPerJob: self.jobSplittingPerEvents()
248 >        if self.selectNoInput:
249 >            if self.pset == None: #CarlosDaniele
250 >                self.jobSplittingForScript()
251 >            else:
252 >                self.jobSplittingNoInput()
253          else:
254 <            msg = 'Don\'t know how to split...'
239 <            raise CrabException(msg)
254 >            self.jobSplittingByBlocks(blockSites)
255  
256 +        # modify Pset
257 +        if self.pset != None: #CarlosDaniele
258 +            try:
259 +                # Add FrameworkJobReport to parameter-set, set max events.
260 +                # Reset later for data jobs by writeCFG which does all modifications
261 +                PsetEdit.addCrabFJR(self.fjrFileName)
262 +                PsetEdit.maxEvent(self.eventsPerJob)
263 +                PsetEdit.psetWriter(self.configFilename())
264 +            except:
265 +                msg='Error while manipuliating ParameterSet: exiting...'
266 +                raise CrabException(msg)
267  
268      def DataDiscoveryAndLocation(self, cfg_params):
269  
270 +        import DataDiscovery
271 +        import DataLocation
272          common.logger.debug(10,"CMSSW::DataDiscoveryAndLocation()")
273  
274          datasetPath=self.datasetPath
275  
248        ## TODO
249        dataTiersList = ""
250        dataTiers = dataTiersList.split(',')
251
276          ## Contact the DBS
277 +        common.logger.message("Contacting Data Discovery Services ...")
278          try:
279 <            self.pubdata=DataDiscovery_EDM.DataDiscovery_EDM(datasetPath, dataTiers, cfg_params)
279 >            self.pubdata=DataDiscovery.DataDiscovery(datasetPath, cfg_params)
280              self.pubdata.fetchDBSInfo()
281  
282 <        except DataDiscovery_EDM.NotExistingDatasetError, ex :
282 >        except DataDiscovery.NotExistingDatasetError, ex :
283              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
284              raise CrabException(msg)
285 <
261 <        except DataDiscovery_EDM.NoDataTierinProvenanceError, ex :
285 >        except DataDiscovery.NoDataTierinProvenanceError, ex :
286              msg = 'ERROR ***: failed Data Discovery in DBS : %s'%ex.getErrorMessage()
287              raise CrabException(msg)
288 <        except DataDiscovery_EDM.DataDiscoveryError, ex:
289 <            msg = 'ERROR ***: failed Data Discovery in DBS  %s'%ex.getErrorMessage()
288 >        except DataDiscovery.DataDiscoveryError, ex:
289 >            msg = 'ERROR ***: failed Data Discovery in DBS :  %s'%ex.getErrorMessage()
290              raise CrabException(msg)
291  
292 <        ## get list of all required data in the form of dbs paths  (dbs path = /dataset/datatier/owner)
293 <        ## self.DBSPaths=self.pubdata.getDBSPaths()
294 <        common.logger.message("Required data are :"+self.datasetPath)
271 <
272 <        filesbyblock=self.pubdata.getFiles()
273 <        self.AllInputFiles=filesbyblock.values()
274 <        self.files = self.AllInputFiles        
275 <
276 <        ## TEMP
277 <    #    self.filesTmp = filesbyblock.values()
278 <    #    self.files = []
279 <    #    locPath='rfio:cmsbose2.bo.infn.it:/flatfiles/SE00/cms/fanfani/ProdTest/'
280 <    #    locPath=''
281 <    #    tmp = []
282 <    #    for file in self.filesTmp[0]:
283 <    #        tmp.append(locPath+file)
284 <    #    self.files.append(tmp)
285 <        ## END TEMP
292 >        self.filesbyblock=self.pubdata.getFiles()
293 >        self.eventsbyblock=self.pubdata.getEventsPerBlock()
294 >        self.eventsbyfile=self.pubdata.getEventsPerFile()
295  
296          ## get max number of events
297 <        #common.logger.debug(10,"number of events for primary fileblocks %i"%self.pubdata.getMaxEvents())
289 <        self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
290 <        common.logger.message("\nThe number of available events is %s"%self.maxEvents)
297 >        self.maxEvents=self.pubdata.getMaxEvents() ##  self.maxEvents used in Creator.py
298  
299          ## Contact the DLS and build a list of sites hosting the fileblocks
300          try:
301 <            dataloc=DataLocation_EDM.DataLocation_EDM(filesbyblock.keys(),cfg_params)
301 >            dataloc=DataLocation.DataLocation(self.filesbyblock.keys(),cfg_params)
302              dataloc.fetchDLSInfo()
303 <        except DataLocation_EDM.DataLocationError , ex:
303 >        except DataLocation.DataLocationError , ex:
304              msg = 'ERROR ***: failed Data Location in DLS \n %s '%ex.getErrorMessage()
305              raise CrabException(msg)
299        
300        allsites=dataloc.getSites()
301        common.logger.debug(5,"sites are %s"%allsites)
302        sites=self.checkBlackList(allsites)
303        common.logger.debug(5,"sites are (after black list) %s"%sites)
304        sites=self.checkWhiteList(sites)
305        common.logger.debug(5,"sites are (after white list) %s"%sites)
306  
307        if len(sites)==0:
308            msg = 'No sites hosting all the needed data! Exiting... '
309            raise CrabException(msg)
307  
308 <        common.logger.message("List of Sites hosting the data : "+str(sites))
309 <        common.logger.debug(6, "List of Sites: "+str(sites))
310 <        common.analisys_common_info['sites']=sites    ## used in SchedulerEdg.py in createSchScript
311 <        self.setParam_('TargetCE', ','.join(sites))
312 <        return
313 <    
314 <    def jobSplittingPerFiles(self):
315 <        """
316 <        Perform job splitting based on number of files to be accessed per job
317 <        """
318 <        common.logger.debug(5,'Splitting per input files')
319 <        common.logger.message('Required '+str(self.filesPerJob)+' files per job ')
320 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
321 <
322 <        ## TODO: SL need to have (from DBS) a detailed list of how many events per each file
323 <        n_tot_files = (len(self.files[0]))
324 <        #print "n_tot_files = ", n_tot_files
325 <        ## SL: this is wrong if the files have different number of events
326 <        #print "self.maxEvents = ", self.maxEvents
327 <        evPerFile = int(self.maxEvents)/n_tot_files
328 <        #print "evPerFile = int(self.maxEvents)/n_tot_files =  ", evPerFile
329 <
330 <        common.logger.debug(5,'Events per File '+str(evPerFile))
331 <
332 <        ## if asked to process all events, do it
333 <        if self.total_number_of_events == -1:
334 <            self.total_number_of_events=self.maxEvents
335 <            self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
336 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for all available events '+str(self.total_number_of_events)+' events')
337 <        
308 >        sites = dataloc.getSites()
309 >        allSites = []
310 >        listSites = sites.values()
311 >        for listSite in listSites:
312 >            for oneSite in listSite:
313 >                allSites.append(oneSite)
314 >        allSites = self.uniquelist(allSites)
315 >
316 >        # screen output
317 >        common.logger.message("Requested dataset: " + datasetPath + " has " + str(self.maxEvents) + " events in " + str(len(self.filesbyblock.keys())) + " blocks.\n")
318 >
319 >        return sites
320 >
321 >  # to Be Removed  DS -- BL
322 >  #  def setArgsList(self, argsList):
323 >  #      self.argsList = argsList
324 >
325 >    def jobSplittingByBlocks(self, blockSites):
326 >        """
327 >        Perform job splitting. Jobs run over an integer number of files
328 >        and no more than one block.
329 >        ARGUMENT: blockSites: dictionary with blocks as keys and list of host sites as values
330 >        REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
331 >                  self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
332 >                  self.maxEvents, self.filesbyblock
333 >        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
334 >              self.total_number_of_jobs - Total # of jobs
335 >              self.list_of_args - File(s) job will run on (a list of lists)
336 >        """
337 >
338 >        # ---- Handle the possible job splitting configurations ---- #
339 >        if (self.selectTotalNumberEvents):
340 >            totalEventsRequested = self.total_number_of_events
341 >        if (self.selectEventsPerJob):
342 >            eventsPerJobRequested = self.eventsPerJob
343 >            if (self.selectNumberOfJobs):
344 >                totalEventsRequested = self.theNumberOfJobs * self.eventsPerJob
345 >
346 >        # If user requested all the events in the dataset
347 >        if (totalEventsRequested == -1):
348 >            eventsRemaining=self.maxEvents
349 >        # If user requested more events than are in the dataset
350 >        elif (totalEventsRequested > self.maxEvents):
351 >            eventsRemaining = self.maxEvents
352 >            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
353 >        # If user requested less events than are in the dataset
354          else:
355 <            #print "self.total_number_of_events = ", self.total_number_of_events
343 <            #print "evPerFile = ", evPerFile
344 <            self.total_number_of_files = int(self.total_number_of_events/evPerFile)
345 <            #print "self.total_number_of_files = int(self.total_number_of_events/evPerFile) = " , self.total_number_of_files
346 <            ## SL: if ask for less event than what is computed to be available on a
347 <            ##     file, process the first file anyhow.
348 <            if self.total_number_of_files == 0:
349 <                self.total_number_of_files = self.total_number_of_files + 1
350 <                
355 >            eventsRemaining = totalEventsRequested
356  
357 <            common.logger.debug(5,'N files  '+str(self.total_number_of_files))
357 >        # If user requested more events per job than are in the dataset
358 >        if (self.selectEventsPerJob and eventsPerJobRequested > self.maxEvents):
359 >            eventsPerJobRequested = self.maxEvents
360  
361 <            check = 0
362 <            
356 <            ## Compute the number of jobs
357 <            #self.total_number_of_jobs = int(n_tot_files)*1/int(self.filesPerJob)
358 <            #print "self.total_number_of_files = ", self.total_number_of_files
359 <            #print "self.filesPerJob = ", self.filesPerJob
360 <            self.total_number_of_jobs = int(self.total_number_of_files/self.filesPerJob)
361 <            #print "self.total_number_of_jobs = ", self.total_number_of_jobs
362 <            common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
363 <
364 <            ## is there any remainder?
365 <            check = int(self.total_number_of_files) - (int(self.total_number_of_jobs)*self.filesPerJob)
366 <
367 <            common.logger.debug(5,'Check  '+str(check))
368 <
369 <            if check > 0:
370 <                self.total_number_of_jobs =  self.total_number_of_jobs + 1
371 <                common.logger.message('Warning: last job will be created with '+str(check)+' files')
361 >        # For user info at end
362 >        totalEventCount = 0
363  
364 <            #common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs-1)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
365 <            common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str((self.total_number_of_jobs)*self.filesPerJob*evPerFile + check*evPerFile)+' events')
366 <            pass
364 >        if (self.selectTotalNumberEvents and self.selectNumberOfJobs):
365 >            eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
366 >
367 >        if (self.selectNumberOfJobs):
368 >            common.logger.message("May not create the exact number_of_jobs requested.")
369 >
370 >        if ( self.ncjobs == 'all' ) :
371 >            totalNumberOfJobs = 999999999
372 >        else :
373 >            totalNumberOfJobs = self.ncjobs
374  
375 +        blocks = blockSites.keys()
376 +        blockCount = 0
377 +        # Backup variable in case self.maxEvents counted events in a non-included block
378 +        numBlocksInDataset = len(blocks)
379 +
380 +        jobCount = 0
381          list_of_lists = []
382 <        for i in xrange(0, int(n_tot_files), self.filesPerJob):
383 <            parString = "\\{"
384 <            
385 <            params = self.files[0][i: i+self.filesPerJob]
386 <            for i in range(len(params) - 1):
387 <                parString += '\\\"' + params[i] + '\\\"\,'
388 <            
389 <            parString += '\\\"' + params[len(params) - 1] + '\\\"\\}'
390 <            list_of_lists.append(parString)
391 <            pass
382 >
383 >        # list tracking which jobs are in which jobs belong to which block
384 >        jobsOfBlock = {}
385 >
386 >        # ---- Iterate over the blocks in the dataset until ---- #
387 >        # ---- we've met the requested total # of events    ---- #
388 >        while ( (eventsRemaining > 0) and (blockCount < numBlocksInDataset) and (jobCount < totalNumberOfJobs)):
389 >            block = blocks[blockCount]
390 >            blockCount += 1
391 >            if block not in jobsOfBlock.keys() :
392 >                jobsOfBlock[block] = []
393 >
394 >            if self.eventsbyblock.has_key(block) :
395 >                numEventsInBlock = self.eventsbyblock[block]
396 >                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
397 >
398 >                files = self.filesbyblock[block]
399 >                numFilesInBlock = len(files)
400 >                if (numFilesInBlock <= 0):
401 >                    continue
402 >                fileCount = 0
403 >
404 >                # ---- New block => New job ---- #
405 >                parString = ""
406 >                # counter for number of events in files currently worked on
407 >                filesEventCount = 0
408 >                # flag if next while loop should touch new file
409 >                newFile = 1
410 >                # job event counter
411 >                jobSkipEventCount = 0
412 >
413 >                # ---- Iterate over the files in the block until we've met the requested ---- #
414 >                # ---- total # of events or we've gone over all the files in this block  ---- #
415 >                while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
416 >                    file = files[fileCount]
417 >                    if newFile :
418 >                        try:
419 >                            numEventsInFile = self.eventsbyfile[file]
420 >                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
421 >                            # increase filesEventCount
422 >                            filesEventCount += numEventsInFile
423 >                            # Add file to current job
424 >                            parString += '\\\"' + file + '\\\"\,'
425 >                            newFile = 0
426 >                        except KeyError:
427 >                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
428 >
429 >                    eventsPerJobRequested = min(eventsPerJobRequested, eventsRemaining)
430 >                    # if less events in file remain than eventsPerJobRequested
431 >                    if ( filesEventCount - jobSkipEventCount < eventsPerJobRequested):
432 >                        # if last file in block
433 >                        if ( fileCount == numFilesInBlock-1 ) :
434 >                            # end job using last file, use remaining events in block
435 >                            # close job and touch new file
436 >                            fullString = parString[:-2]
437 >                            list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
438 >                            common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
439 >                            self.jobDestination.append(blockSites[block])
440 >                            common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
441 >                            # fill jobs of block dictionary
442 >                            jobsOfBlock[block].append(jobCount+1)
443 >                            # reset counter
444 >                            jobCount = jobCount + 1
445 >                            totalEventCount = totalEventCount + filesEventCount - jobSkipEventCount
446 >                            eventsRemaining = eventsRemaining - filesEventCount + jobSkipEventCount
447 >                            jobSkipEventCount = 0
448 >                            # reset file
449 >                            parString = ""
450 >                            filesEventCount = 0
451 >                            newFile = 1
452 >                            fileCount += 1
453 >                        else :
454 >                            # go to next file
455 >                            newFile = 1
456 >                            fileCount += 1
457 >                    # if events in file equal to eventsPerJobRequested
458 >                    elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
459 >                        # close job and touch new file
460 >                        fullString = parString[:-2]
461 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
462 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
463 >                        self.jobDestination.append(blockSites[block])
464 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
465 >                        jobsOfBlock[block].append(jobCount+1)
466 >                        # reset counter
467 >                        jobCount = jobCount + 1
468 >                        totalEventCount = totalEventCount + eventsPerJobRequested
469 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
470 >                        jobSkipEventCount = 0
471 >                        # reset file
472 >                        parString = ""
473 >                        filesEventCount = 0
474 >                        newFile = 1
475 >                        fileCount += 1
476 >
477 >                    # if more events in file remain than eventsPerJobRequested
478 >                    else :
479 >                        # close job but don't touch new file
480 >                        fullString = parString[:-2]
481 >                        list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
482 >                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
483 >                        self.jobDestination.append(blockSites[block])
484 >                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
485 >                        jobsOfBlock[block].append(jobCount+1)
486 >                        # increase counter
487 >                        jobCount = jobCount + 1
488 >                        totalEventCount = totalEventCount + eventsPerJobRequested
489 >                        eventsRemaining = eventsRemaining - eventsPerJobRequested
490 >                        # calculate skip events for last file
491 >                        # use filesEventCount (contains several files), jobSkipEventCount and eventsPerJobRequest
492 >                        jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
493 >                        # remove all but the last file
494 >                        filesEventCount = self.eventsbyfile[file]
495 >                        parString = '\\\"' + file + '\\\"\,'
496 >                    pass # END if
497 >                pass # END while (iterate over files in the block)
498 >        pass # END while (iterate over blocks in the dataset)
499 >        self.ncjobs = self.total_number_of_jobs = jobCount
500 >        if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
501 >            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
502 >        common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
503 >
504 >        # screen output
505 >        screenOutput = "List of jobs and available destination sites:\n\n"
506 >
507 >        # keep trace of block with no sites to print a warning at the end
508 >        noSiteBlock = []
509 >        bloskNoSite = []
510 >
511 >        blockCounter = 0
512 >        for block in blocks:
513 >            if block in jobsOfBlock.keys() :
514 >                blockCounter += 1
515 >                screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),
516 >                    ','.join(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],block),block)))
517 >                if len(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],block),block)) == 0:
518 >                    noSiteBlock.append( spanRanges(jobsOfBlock[block]) )
519 >                    bloskNoSite.append( blockCounter )
520 >
521 >        common.logger.message(screenOutput)
522 >        if len(noSiteBlock) > 0 and len(bloskNoSite) > 0:
523 >            msg = 'WARNING: No sites are hosting any part of data for block:\n                '
524 >            virgola = ""
525 >            if len(bloskNoSite) > 1:
526 >                virgola = ","
527 >            for block in bloskNoSite:
528 >                msg += ' ' + str(block) + virgola
529 >            msg += '\n               Related jobs:\n                 '
530 >            virgola = ""
531 >            if len(noSiteBlock) > 1:
532 >                virgola = ","
533 >            for range_jobs in noSiteBlock:
534 >                msg += str(range_jobs) + virgola
535 >            msg += '\n               will not be submitted and this block of data can not be analyzed!\n'
536 >            if self.cfg_params.has_key('EDG.se_white_list'):
537 >                msg += 'WARNING: SE White List: '+self.cfg_params['EDG.se_white_list']+'\n'
538 >                msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
539 >                msg += 'Please check if the dataset is available at this site!)\n'
540 >            if self.cfg_params.has_key('EDG.ce_white_list'):
541 >                msg += 'WARNING: CE White List: '+self.cfg_params['EDG.ce_white_list']+'\n'
542 >                msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
543 >                msg += 'Please check if the dataset is available at this site!)\n'
544 >
545 >            common.logger.message(msg)
546  
547          self.list_of_args = list_of_lists
390        #print self.list_of_args
548          return
549  
550 <    def jobSplittingPerEvents(self):
550 >    def jobSplittingNoInput(self):
551          """
552          Perform job splitting based on number of event per job
553          """
554          common.logger.debug(5,'Splitting per events')
555 <        common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
556 <        common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
555 >
556 >        if (self.selectEventsPerJob):
557 >            common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
558 >        if (self.selectNumberOfJobs):
559 >            common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
560 >        if (self.selectTotalNumberEvents):
561 >            common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
562  
563          if (self.total_number_of_events < 0):
564              msg='Cannot split jobs per Events with "-1" as total number of events'
565              raise CrabException(msg)
566  
567 <        self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
567 >        if (self.selectEventsPerJob):
568 >            if (self.selectTotalNumberEvents):
569 >                self.total_number_of_jobs = int(self.total_number_of_events/self.eventsPerJob)
570 >            elif(self.selectNumberOfJobs) :
571 >                self.total_number_of_jobs =self.theNumberOfJobs
572 >                self.total_number_of_events =int(self.theNumberOfJobs*self.eventsPerJob)
573 >
574 >        elif (self.selectNumberOfJobs) :
575 >            self.total_number_of_jobs = self.theNumberOfJobs
576 >            self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
577  
407        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
408        print "self.total_number_of_events = ", self.total_number_of_events
409        print "self.eventsPerJob = ", self.eventsPerJob
410        print "self.total_number_of_jobs = ", self.total_number_of_jobs
411        print "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"
412        
578          common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
579  
580          # is there any remainder?
# Line 417 | Line 582 | class Cmssw(JobType):
582  
583          common.logger.debug(5,'Check  '+str(check))
584  
585 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
586          if check > 0:
587 <            common.logger.message('Warning: asked '+self.total_number_of_events+' but will do only '+(int(self.total_number_of_jobs)*self.eventsPerJob))
422 <
423 <        common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
587 >            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
588  
589          # argument is seed number.$i
590          self.list_of_args = []
591          for i in range(self.total_number_of_jobs):
592 <            self.list_of_args.append(int(str(self.sourceSeed)+str(i)))
593 <        print self.list_of_args
592 >            ## Since there is no input, any site is good
593 >            self.jobDestination.append([""]) #must be empty to write correctly the xml
594 >            args=[]
595 >            if (self.firstRun):
596 >                ## pythia first run
597 >                args.append(str(self.firstRun)+str(i))
598 >            self.list_of_args.append(args)
599 >
600 >        return
601 >
602 >
603 >    def jobSplittingForScript(self):#CarlosDaniele
604 >        """
605 >        Perform job splitting based on number of job
606 >        """
607 >        common.logger.debug(5,'Splitting per job')
608 >        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
609 >
610 >        self.total_number_of_jobs = self.theNumberOfJobs
611  
612 +        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
613 +
614 +        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
615 +
616 +        # argument is seed number.$i
617 +        self.list_of_args = []
618 +        for i in range(self.total_number_of_jobs):
619 +            ## Since there is no input, any site is good
620 +           # self.jobDestination.append(["Any"])
621 +            self.jobDestination.append([""])
622 +            ## no random seed
623 +            self.list_of_args.append([str(i)])
624          return
625  
626      def split(self, jobParams):
627 <
435 <        common.jobDB.load()
627 >
628          #### Fabio
629          njobs = self.total_number_of_jobs
630          arglist = self.list_of_args
631          # create the empty structure
632          for i in range(njobs):
633              jobParams.append("")
634 <        
634 >
635 >        listID=[]
636 >        listField=[]
637          for job in range(njobs):
638 <            jobParams[job] = str(arglist[job])
639 <            common.jobDB.setArguments(job, jobParams[job])
638 >            jobParams[job] = arglist[job]
639 >            listID.append(job+1)
640 >            job_ToSave ={}
641 >            concString = ' '
642 >            argu=''
643 >            if len(jobParams[job]):
644 >                argu +=   concString.join(jobParams[job] )
645 >            job_ToSave['arguments']= str(job+1)+' '+argu## new BL--DS
646 >            job_ToSave['dlsDestination']= self.jobDestination[job]## new BL--DS
647 >            #common._db.updateJob_(job,job_ToSave)## new BL--DS
648 >            listField.append(job_ToSave)
649 >            msg="Job "+str(job)+" Arguments:   "+str(job+1)+" "+argu+"\n"  \
650 >            +"                     Destination: "+str(self.jobDestination[job])
651 >            common.logger.debug(5,msg)
652 >            #common.logger.debug(5,"Job "+str(job)+" Destination: "+str(self.jobDestination[job]))
653 >        common._db.updateJob_(listID,listField)## new BL--DS
654 >        ## Pay Attention Here....DS--BL
655 >        self.argsList = (len(jobParams[1])+1)
656  
447        common.jobDB.save()
657          return
658 <    
450 <    def getJobTypeArguments(self, nj, sched):
451 <        return common.jobDB.arguments(nj)
452 <  
658 >
659      def numberOfJobs(self):
660          # Fabio
661          return self.total_number_of_jobs
662  
457    def checkBlackList(self, allSites):
458        if len(self.reCEBlackList)==0: return allSites
459        sites = []
460        for site in allSites:
461            common.logger.debug(10,'Site '+site)
462            good=1
463            for re in self.reCEBlackList:
464                if re.search(site):
465                    common.logger.message('CE in black list, skipping site '+site)
466                    good=0
467                pass
468            if good: sites.append(site)
469        if len(sites) == 0:
470            common.logger.debug(3,"No sites found after BlackList")
471        return sites
472
473    def checkWhiteList(self, allSites):
474
475        if len(self.reCEWhiteList)==0: return allSites
476        sites = []
477        for site in allSites:
478            good=0
479            for re in self.reCEWhiteList:
480                if re.search(site):
481                    common.logger.debug(5,'CE in white list, adding site '+site)
482                    good=1
483                if not good: continue
484                sites.append(site)
485        if len(sites) == 0:
486            common.logger.message("No sites found after WhiteList\n")
487        else:
488            common.logger.debug(5,"Selected sites via WhiteList are "+str(sites)+"\n")
489        return sites
490
663      def getTarBall(self, exe):
664          """
665          Return the TarBall with lib and exe
666          """
667 <        
667 >
668          # if it exist, just return it
669 <        self.tgzNameWithPath = common.work_space.shareDir()+self.tgz_name
669 >        #
670 >        # Marco. Let's start to use relative path for Boss XML files
671 >        #
672 >        self.tgzNameWithPath = common.work_space.pathForTgz()+'share/'+self.tgz_name
673          if os.path.exists(self.tgzNameWithPath):
674              return self.tgzNameWithPath
675  
# Line 508 | Line 683 | class Cmssw(JobType):
683          # First of all declare the user Scram area
684          swArea = self.scram.getSWArea_()
685          #print "swArea = ", swArea
686 <        swVersion = self.scram.getSWVersion()
687 <        #print "swVersion = ", swVersion
686 >        # swVersion = self.scram.getSWVersion()
687 >        # print "swVersion = ", swVersion
688          swReleaseTop = self.scram.getReleaseTop_()
689          #print "swReleaseTop = ", swReleaseTop
690 <        
690 >
691          ## check if working area is release top
692          if swReleaseTop == '' or swArea == swReleaseTop:
693 +            common.logger.debug(3,"swArea = "+swArea+" swReleaseTop ="+swReleaseTop)
694              return
695  
696 <        filesToBeTarred = []
697 <        ## First find the executable
698 <        if (self.executable != ''):
699 <            exeWithPath = self.scram.findFile_(executable)
700 < #           print exeWithPath
701 <            if ( not exeWithPath ):
702 <                raise CrabException('User executable '+executable+' not found')
703 <
704 <            ## then check if it's private or not
705 <            if exeWithPath.find(swReleaseTop) == -1:
706 <                # the exe is private, so we must ship
707 <                common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
708 <                path = swArea+'/'
709 <                exe = string.replace(exeWithPath, path,'')
710 <                filesToBeTarred.append(exe)
711 <                pass
712 <            else:
713 <                # the exe is from release, we'll find it on WN
714 <                pass
715 <
716 <        ## Now get the libraries: only those in local working area
717 <        libDir = 'lib'
718 <        lib = swArea+'/' +libDir
719 <        common.logger.debug(5,"lib "+lib+" to be tarred")
720 <        if os.path.exists(lib):
721 <            filesToBeTarred.append(libDir)
722 <
723 <        ## Now check if module dir is present
724 <        moduleDir = 'module'
725 <        if os.path.isdir(swArea+'/'+moduleDir):
726 <            filesToBeTarred.append(moduleDir)
727 <
728 <        ## Now check if the Data dir is present
729 <        dataDir = 'src/Data/'
730 <        if os.path.isdir(swArea+'/'+dataDir):
731 <            filesToBeTarred.append(dataDir)
732 <
733 <        ## Create the tar-ball
734 <        if len(filesToBeTarred)>0:
735 <            cwd = os.getcwd()
736 <            os.chdir(swArea)
737 <            tarcmd = 'tar zcvf ' + self.tgzNameWithPath + ' '
738 <            for line in filesToBeTarred:
739 <                tarcmd = tarcmd + line + ' '
740 <            cout = runCommand(tarcmd)
741 <            if not cout:
742 <                raise CrabException('Could not create tar-ball')
743 <            os.chdir(cwd)
744 <        else:
745 <            common.logger.debug(5,"No files to be to be tarred")
746 <        
696 >        import tarfile
697 >        try: # create tar ball
698 >            tar = tarfile.open(self.tgzNameWithPath, "w:gz")
699 >            ## First find the executable
700 >            if (self.executable != ''):
701 >                exeWithPath = self.scram.findFile_(executable)
702 >                if ( not exeWithPath ):
703 >                    raise CrabException('User executable '+executable+' not found')
704 >
705 >                ## then check if it's private or not
706 >                if exeWithPath.find(swReleaseTop) == -1:
707 >                    # the exe is private, so we must ship
708 >                    common.logger.debug(5,"Exe "+exeWithPath+" to be tarred")
709 >                    path = swArea+'/'
710 >                    # distinguish case when script is in user project area or given by full path somewhere else
711 >                    if exeWithPath.find(path) >= 0 :
712 >                        exe = string.replace(exeWithPath, path,'')
713 >                        tar.add(path+exe,exe)
714 >                    else :
715 >                        tar.add(exeWithPath,os.path.basename(executable))
716 >                    pass
717 >                else:
718 >                    # the exe is from release, we'll find it on WN
719 >                    pass
720 >
721 >            ## Now get the libraries: only those in local working area
722 >            libDir = 'lib'
723 >            lib = swArea+'/' +libDir
724 >            common.logger.debug(5,"lib "+lib+" to be tarred")
725 >            if os.path.exists(lib):
726 >                tar.add(lib,libDir)
727 >
728 >            ## Now check if module dir is present
729 >            moduleDir = 'module'
730 >            module = swArea + '/' + moduleDir
731 >            if os.path.isdir(module):
732 >                tar.add(module,moduleDir)
733 >
734 >            ## Now check if any data dir(s) is present
735 >            swAreaLen=len(swArea)
736 >            for root, dirs, files in os.walk(swArea):
737 >                if "data" in dirs:
738 >                    common.logger.debug(5,"data "+root+"/data"+" to be tarred")
739 >                    tar.add(root+"/data",root[swAreaLen:]+"/data")
740 >
741 >
742 >            ## Add ProdCommon dir to tar
743 >            prodcommonDir = 'ProdCommon'
744 >            prodcommonPath = os.environ['CRABDIR'] + '/' + 'ProdCommon'
745 >            if os.path.isdir(prodcommonPath):
746 >                tar.add(prodcommonPath,prodcommonDir)
747 >
748 >            common.logger.debug(5,"Files added to "+self.tgzNameWithPath+" : "+str(tar.getnames()))
749 >            tar.close()
750 >        except :
751 >            raise CrabException('Could not create tar-ball')
752 >
753 >        ## check for tarball size
754 >        tarballinfo = os.stat(self.tgzNameWithPath)
755 >        if ( tarballinfo.st_size > self.MaxTarBallSize*1024*1024 ) :
756 >            raise CrabException('Input sandbox size of ' + str(float(tarballinfo.st_size)/1024.0/1024.0) + ' MB is larger than the allowed ' + str(self.MaxTarBallSize) + ' MB input sandbox limit and not supported by the used GRID submission system. Please make sure that no unnecessary files are in all data directories in your local CMSSW project area as they are automatically packed into the input sandbox.')
757 >
758 >        ## create tar-ball with ML stuff
759 >        self.MLtgzfile =  common.work_space.pathForTgz()+'share/MLfiles.tgz'
760 >        try:
761 >            tar = tarfile.open(self.MLtgzfile, "w:gz")
762 >            path=os.environ['CRABDIR'] + '/python/'
763 >            for file in ['report.py', 'DashboardAPI.py', 'Logger.py', 'ProcInfo.py', 'apmon.py', 'parseCrabFjr.py','writeCfg.py', 'JobReportErrorCode.py']:
764 >                tar.add(path+file,file)
765 >            common.logger.debug(5,"Files added to "+self.MLtgzfile+" : "+str(tar.getnames()))
766 >            tar.close()
767 >        except :
768 >            raise CrabException('Could not create ML files tar-ball')
769 >
770          return
771 <        
772 <    def wsSetupEnvironment(self, nj):
771 >
772 >    def additionalInputFileTgz(self):
773 >        """
774 >        Put all additional files into a tar ball and return its name
775 >        """
776 >        import tarfile
777 >        tarName=  common.work_space.pathForTgz()+'share/'+self.additional_tgz_name
778 >        tar = tarfile.open(tarName, "w:gz")
779 >        for file in self.additional_inbox_files:
780 >            tar.add(file,string.split(file,'/')[-1])
781 >        common.logger.debug(5,"Files added to "+self.additional_tgz_name+" : "+str(tar.getnames()))
782 >        tar.close()
783 >        return tarName
784 >
785 >    def wsSetupEnvironment(self, nj=0):
786          """
787          Returns part of a job script which prepares
788          the execution environment for the job 'nj'.
789          """
790          # Prepare JobType-independent part
791 <        txt = ''
792 <  
793 <        ## OLI_Daniele at this level  middleware already known
582 <
583 <        txt += 'if [ $middleware == LCG ]; then \n'
791 >        txt = '\n#Written by cms_cmssw::wsSetupEnvironment\n'
792 >        txt += 'echo ">>> setup environment"\n'
793 >        txt += 'if [ $middleware == LCG ]; then \n'
794          txt += self.wsSetupCMSLCGEnvironment_()
795          txt += 'elif [ $middleware == OSG ]; then\n'
796 <        txt += '    time=`date -u +"%s"`\n'
797 <        txt += '    WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
798 <        txt += '    echo "Creating working directory: $WORKING_DIR"\n'
799 <        txt += '    /bin/mkdir -p $WORKING_DIR\n'
800 <        txt += '    if [ ! -d $WORKING_DIR ] ;then\n'
591 <        txt += '        echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
592 <        txt += '        echo "JOB_EXIT_STATUS = 10016"\n'
593 <        txt += '        echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
594 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
595 <        txt += '        rm -f $RUNTIME_AREA/$repo \n'
596 <        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
597 <        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
598 <        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
599 <        txt += '        exit 1\n'
796 >        txt += '    WORKING_DIR=`/bin/mktemp  -d $OSG_WN_TMP/cms_XXXXXXXXXXXX`\n'
797 >        txt += '    if [ ! $? == 0 ] ;then\n'
798 >        txt += '        echo "ERROR ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
799 >        txt += '        job_exit_code=10016\n'
800 >        txt += '        func_exit\n'
801          txt += '    fi\n'
802 +        txt += '    echo ">>> Created working directory: $WORKING_DIR"\n'
803          txt += '\n'
804          txt += '    echo "Change to working directory: $WORKING_DIR"\n'
805          txt += '    cd $WORKING_DIR\n'
806 <        txt += self.wsSetupCMSOSGEnvironment_()
806 >        txt += '    echo ">>> current directory (WORKING_DIR): $WORKING_DIR"\n'
807 >        txt += self.wsSetupCMSOSGEnvironment_()
808 >        #txt += '    echo "### Set SCRAM ARCH to ' + self.executable_arch + ' ###"\n'
809 >        #txt += '    export SCRAM_ARCH='+self.executable_arch+'\n'
810          txt += 'fi\n'
811  
812          # Prepare JobType-specific part
813          scram = self.scram.commandName()
814          txt += '\n\n'
815 <        txt += 'echo "### SPECIFIC JOB SETUP ENVIRONMENT ###"\n'
815 >        txt += 'echo ">>> specific cmssw setup environment:"\n'
816 >        txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
817          txt += scram+' project CMSSW '+self.version+'\n'
818          txt += 'status=$?\n'
819          txt += 'if [ $status != 0 ] ; then\n'
820 <        txt += '   echo "SET_EXE_ENV 10034 ==>ERROR CMSSW '+self.version+' not found on `hostname`" \n'
821 <        txt += '   echo "JOB_EXIT_STATUS = 10034"\n'
822 <        txt += '   echo "JobExitCode=10034" | tee -a $RUNTIME_AREA/$repo\n'
617 <        txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
618 <        txt += '   rm -f $RUNTIME_AREA/$repo \n'
619 <        txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
620 <        txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
621 <        txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
622 <        ## OLI_Daniele
623 <        txt += '    if [ $middleware == OSG ]; then \n'
624 <        txt += '        echo "Remove working directory: $WORKING_DIR"\n'
625 <        txt += '        cd $RUNTIME_AREA\n'
626 <        txt += '        /bin/rm -rf $WORKING_DIR\n'
627 <        txt += '        if [ -d $WORKING_DIR ] ;then\n'
628 <        txt += '            echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after CMSSW CMSSW_0_6_1 not found on `hostname`"\n'
629 <        txt += '            echo "JOB_EXIT_STATUS = 10018"\n'
630 <        txt += '            echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
631 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
632 <        txt += '            rm -f $RUNTIME_AREA/$repo \n'
633 <        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
634 <        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
635 <        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
636 <        txt += '        fi\n'
637 <        txt += '    fi \n'
638 <        txt += '   exit 1 \n'
820 >        txt += '    echo "ERROR ==> CMSSW '+self.version+' not found on `hostname`" \n'
821 >        txt += '    job_exit_code=10034\n'
822 >        txt += '    func_exit\n'
823          txt += 'fi \n'
640        txt += 'echo "CMSSW_VERSION =  '+self.version+'"\n'
824          txt += 'cd '+self.version+'\n'
825 +        ########## FEDE FOR DBS2 ######################
826 +        txt += 'SOFTWARE_DIR=`pwd`\n'
827 +        txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n'
828 +        ###############################################
829          ### needed grep for bug in scramv1 ###
830          txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
644
831          # Handle the arguments:
832          txt += "\n"
833          txt += "## number of arguments (first argument always jobnumber)\n"
834          txt += "\n"
835 <        txt += "narg=$#\n"
650 <        txt += "if [ $narg -lt 2 ]\n"
835 >        txt += "if [ $nargs -lt "+str(self.argsList)+" ]\n"
836          txt += "then\n"
837 <        txt += "    echo 'SET_EXE_ENV 1 ==> ERROR Too few arguments' +$narg+ \n"
838 <        txt += '    echo "JOB_EXIT_STATUS = 50113"\n'
839 <        txt += '    echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
655 <        txt += '    dumpStatus $RUNTIME_AREA/$repo\n'
656 <        txt += '    rm -f $RUNTIME_AREA/$repo \n'
657 <        txt += '    echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
658 <        txt += '    echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
659 <        txt += '    echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
660 <        ## OLI_Daniele
661 <        txt += '    if [ $middleware == OSG ]; then \n'
662 <        txt += '        echo "Remove working directory: $WORKING_DIR"\n'
663 <        txt += '        cd $RUNTIME_AREA\n'
664 <        txt += '        /bin/rm -rf $WORKING_DIR\n'
665 <        txt += '        if [ -d $WORKING_DIR ] ;then\n'
666 <        txt += '            echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
667 <        txt += '            echo "JOB_EXIT_STATUS = 50114"\n'
668 <        txt += '            echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
669 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
670 <        txt += '            rm -f $RUNTIME_AREA/$repo \n'
671 <        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
672 <        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
673 <        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
674 <        txt += '        fi\n'
675 <        txt += '    fi \n'
676 <        txt += "    exit 1\n"
837 >        txt += "    echo 'ERROR ==> Too few arguments' +$nargs+ \n"
838 >        txt += '    job_exit_code=50113\n'
839 >        txt += "    func_exit\n"
840          txt += "fi\n"
841          txt += "\n"
842  
843          # Prepare job-specific part
844          job = common.job_list[nj]
845 <        pset = os.path.basename(job.configFilename())
846 <        txt += '\n'
847 <        if (self.datasetPath): # standard job
848 <            txt += 'InputFiles=$2\n'
686 <            txt += 'echo "Inputfiles:<$InputFiles>"\n'
687 <            txt += 'sed "s#{\'INPUT\'}#$InputFiles#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
688 <        else:  # pythia like job
689 <            txt += 'Seed=$2\n'
690 <            txt += 'echo "Seed: <$Seed>"\n'
691 <            txt += 'sed "s#INPUT#$Seed#" $RUNTIME_AREA/'+pset+' > pset.cfg\n'
845 >        ### FEDE FOR DBS OUTPUT PUBLICATION
846 >        if (self.datasetPath):
847 >            txt += '\n'
848 >            txt += 'DatasetPath='+self.datasetPath+'\n'
849  
850 <        if len(self.additional_inbox_files) > 0:
694 <            for file in self.additional_inbox_files:
695 <                txt += 'if [ -e $RUNTIME_AREA/'+file+' ] ; then\n'
696 <                txt += '   cp $RUNTIME_AREA/'+file+' .\n'
697 <                txt += '   chmod +x '+file+'\n'
698 <                txt += 'fi\n'
699 <            pass
850 >            datasetpath_split = self.datasetPath.split("/")
851  
852 <        txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
852 >            txt += 'PrimaryDataset='+datasetpath_split[1]+'\n'
853 >            txt += 'DataTier='+datasetpath_split[2]+'\n'
854 >            txt += 'ApplicationFamily=cmsRun\n'
855  
856 <        txt += '\n'
857 <        txt += 'echo "***** cat pset.cfg *********"\n'
858 <        txt += 'cat pset.cfg\n'
859 <        txt += 'echo "****** end pset.cfg ********"\n'
860 <        txt += '\n'
861 <        # txt += 'echo "***** cat pset1.cfg *********"\n'
862 <        # txt += 'cat pset1.cfg\n'
863 <        # txt += 'echo "****** end pset1.cfg ********"\n'
856 >        else:
857 >            txt += 'DatasetPath=MCDataTier\n'
858 >            txt += 'PrimaryDataset=null\n'
859 >            txt += 'DataTier=null\n'
860 >            txt += 'ApplicationFamily=MCDataTier\n'
861 >        if self.pset != None:
862 >            pset = os.path.basename(job.configFilename())
863 >            txt += '\n'
864 >            txt += 'cp  $RUNTIME_AREA/'+pset+' .\n'
865 >            if (self.datasetPath): # standard job
866 >                txt += 'InputFiles=${args[1]}; export InputFiles\n'
867 >                txt += 'MaxEvents=${args[2]}; export MaxEvents\n'
868 >                txt += 'SkipEvents=${args[3]}; export SkipEvents\n'
869 >                txt += 'echo "Inputfiles:<$InputFiles>"\n'
870 >                txt += 'echo "MaxEvents:<$MaxEvents>"\n'
871 >                txt += 'echo "SkipEvents:<$SkipEvents>"\n'
872 >            else:  # pythia like job
873 >                txt += 'PreserveSeeds='  + ','.join(self.preserveSeeds)  + '; export PreserveSeeds\n'
874 >                txt += 'IncrementSeeds=' + ','.join(self.incrementSeeds) + '; export IncrementSeeds\n'
875 >                txt += 'echo "PreserveSeeds: <$PreserveSeeds>"\n'
876 >                txt += 'echo "IncrementSeeds:<$IncrementSeeds>"\n'
877 >                if (self.firstRun):
878 >                    txt += 'FirstRun=${args[1]}; export FirstRun\n'
879 >                    txt += 'echo "FirstRun: <$FirstRun>"\n'
880 >
881 >            txt += 'mv -f '+pset+' pset.cfg\n'
882 >
883 >        if len(self.additional_inbox_files) > 0:
884 >            txt += 'if [ -e $RUNTIME_AREA/'+self.additional_tgz_name+' ] ; then\n'
885 >            txt += '  tar xzvf $RUNTIME_AREA/'+self.additional_tgz_name+'\n'
886 >            txt += 'fi\n'
887 >            pass
888 >
889 >        if self.pset != None:
890 >            txt += '\n'
891 >            txt += 'echo "***** cat pset.cfg *********"\n'
892 >            txt += 'cat pset.cfg\n'
893 >            txt += 'echo "****** end pset.cfg ********"\n'
894 >            txt += '\n'
895 >            txt += 'PSETHASH=`EdmConfigHash < pset.cfg` \n'
896 >            txt += 'echo "PSETHASH = $PSETHASH" \n'
897 >            txt += '\n'
898          return txt
899  
900 <    def wsBuildExe(self, nj):
900 >    def wsUntarSoftware(self, nj=0):
901          """
902          Put in the script the commands to build an executable
903          or a library.
904          """
905  
906 <        txt = ""
906 >        txt = '\n#Written by cms_cmssw::wsUntarSoftware\n'
907  
908          if os.path.isfile(self.tgzNameWithPath):
909 <            txt += 'echo "tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'"\n'
909 >            txt += 'echo ">>> tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+' :" \n'
910              txt += 'tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'\n'
911              txt += 'untar_status=$? \n'
912              txt += 'if [ $untar_status -ne 0 ]; then \n'
913 <            txt += '   echo "SET_EXE 1 ==> ERROR Untarring .tgz file failed"\n'
914 <            txt += '   echo "JOB_EXIT_STATUS = $untar_status" \n'
915 <            txt += '   echo "JobExitCode=$untar_status" | tee -a $RUNTIME_AREA/$repo\n'
729 <            txt += '   if [ $middleware == OSG ]; then \n'
730 <            txt += '       echo "Remove working directory: $WORKING_DIR"\n'
731 <            txt += '       cd $RUNTIME_AREA\n'
732 <            txt += '       /bin/rm -rf $WORKING_DIR\n'
733 <            txt += '       if [ -d $WORKING_DIR ] ;then\n'
734 <            txt += '           echo "SET_EXE 50999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Untarring .tgz file failed"\n'
735 <            txt += '           echo "JOB_EXIT_STATUS = 50999"\n'
736 <            txt += '           echo "JobExitCode=50999" | tee -a $RUNTIME_AREA/$repo\n'
737 <            txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
738 <            txt += '           rm -f $RUNTIME_AREA/$repo \n'
739 <            txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
740 <            txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
741 <            txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
742 <            txt += '       fi\n'
743 <            txt += '   fi \n'
744 <            txt += '   \n'
745 <            txt += '   exit 1 \n'
913 >            txt += '   echo "ERROR ==> Untarring .tgz file failed"\n'
914 >            txt += '   job_exit_code=$untar_status\n'
915 >            txt += '   func_exit\n'
916              txt += 'else \n'
917              txt += '   echo "Successful untar" \n'
918              txt += 'fi \n'
919 +            txt += '\n'
920 +            txt += 'echo ">>> Include ProdCommon in PYTHONPATH:"\n'
921 +            txt += 'if [ -z "$PYTHONPATH" ]; then\n'
922 +            txt += '   export PYTHONPATH=$RUNTIME_AREA/ProdCommon\n'
923 +            txt += 'else\n'
924 +            txt += '   export PYTHONPATH=$RUNTIME_AREA/ProdCommon:${PYTHONPATH}\n'
925 +            txt += 'echo "PYTHONPATH=$PYTHONPATH"\n'
926 +            txt += 'fi\n'
927 +            txt += '\n'
928 +
929              pass
930 <        
930 >
931 >        return txt
932 >
933 >    def wsBuildExe(self, nj=0):
934 >        """
935 >        Put in the script the commands to build an executable
936 >        or a library.
937 >        """
938 >
939 >        txt = '\n#Written by cms_cmssw::wsBuildExe\n'
940 >        txt += 'echo ">>> moving CMSSW software directories in `pwd`" \n'
941 >
942 >        txt += 'rm -r lib/ module/ \n'
943 >        txt += 'mv $RUNTIME_AREA/lib/ . \n'
944 >        txt += 'mv $RUNTIME_AREA/module/ . \n'
945 >        txt += 'mv $RUNTIME_AREA/ProdCommon/ . \n'
946 >
947 >        txt += 'if [ -z "$PYTHONPATH" ]; then\n'
948 >        txt += '   export PYTHONPATH=$SOFTWARE_DIR/ProdCommon\n'
949 >        txt += 'else\n'
950 >        txt += '   export PYTHONPATH=$SOFTWARE_DIR/ProdCommon:${PYTHONPATH}\n'
951 >        txt += 'echo "PYTHONPATH=$PYTHONPATH"\n'
952 >        txt += 'fi\n'
953 >        txt += '\n'
954 >
955          return txt
956  
957      def modifySteeringCards(self, nj):
958          """
959 <        modify the card provided by the user,
959 >        modify the card provided by the user,
960          writing a new card into share dir
961          """
962 <        
962 >
963      def executableName(self):
964 <        return self.executable
964 >        if self.scriptExe: #CarlosDaniele
965 >            return "sh "
966 >        else:
967 >            return self.executable
968  
969      def executableArgs(self):
970 <        return " -p pset.cfg"
970 >        # FUTURE: This function tests the CMSSW version. Can be simplified as we drop support for old versions
971 >        if self.scriptExe:#CarlosDaniele
972 >            return   self.scriptExe + " $NJob"
973 >        else:
974 >            version_array = self.scram.getSWVersion().split('_')
975 >            major = 0
976 >            minor = 0
977 >            try:
978 >                major = int(version_array[1])
979 >                minor = int(version_array[2])
980 >            except:
981 >                msg = "Cannot parse CMSSW version string: " + "_".join(version_array) + " for major and minor release number!"
982 >                raise CrabException(msg)
983 >
984 >            ex_args = ""
985 >            # FUTURE: This tests the CMSSW version. Can remove code as versions deprecated
986 >            # Framework job report
987 >            if major >= 1 and minor >= 5 :
988 >                ex_args += " -j $RUNTIME_AREA/crab_fjr_$NJob.xml"
989 >            # Type of cfg file
990 >            if major >= 2 :
991 >                ex_args += " -p pset.py"
992 >            else:
993 >                ex_args += " -p pset.cfg"
994 >            return ex_args
995  
996      def inputSandbox(self, nj):
997          """
998          Returns a list of filenames to be put in JDL input sandbox.
999          """
1000          inp_box = []
1001 <        # dict added to delete duplicate from input sandbox file list
1002 <        seen = {}
1001 >        # # dict added to delete duplicate from input sandbox file list
1002 >        # seen = {}
1003          ## code
1004          if os.path.isfile(self.tgzNameWithPath):
1005              inp_box.append(self.tgzNameWithPath)
1006 +        if os.path.isfile(self.MLtgzfile):
1007 +            inp_box.append(self.MLtgzfile)
1008          ## config
1009 <        inp_box.append(common.job_list[nj].configFilename())
1009 >        if not self.pset is None:
1010 >            inp_box.append(common.work_space.pathForTgz() + 'job/' + self.configFilename())
1011          ## additional input files
1012 <        #for file in self.additional_inbox_files:
1013 <        #    inp_box.append(common.work_space.cwdDir()+file)
1012 >        tgz = self.additionalInputFileTgz()
1013 >        inp_box.append(tgz)
1014 >        ## executable
1015 >        wrapper = os.path.basename(str(common._db.queryTask('scriptName')))
1016 >        inp_box.append(common.work_space.pathForTgz() +'job/'+ wrapper)
1017          return inp_box
1018  
1019      def outputSandbox(self, nj):
# Line 785 | Line 1022 | class Cmssw(JobType):
1022          """
1023          out_box = []
1024  
788        stdout=common.job_list[nj].stdout()
789        stderr=common.job_list[nj].stderr()
790
1025          ## User Declared output files
1026 <        for out in self.output_file:
1027 <            n_out = nj + 1
1026 >        for out in (self.output_file+self.output_file_sandbox):
1027 >            n_out = nj + 1
1028              out_box.append(self.numberFile_(out,str(n_out)))
1029          return out_box
796        return []
1030  
1031      def prepareSteeringCards(self):
1032          """
# Line 806 | Line 1039 | class Cmssw(JobType):
1039          Returns part of a job script which renames the produced files.
1040          """
1041  
1042 <        txt = '\n'
1043 <        txt += '# directory content\n'
1042 >        txt = '\n#Written by cms_cmssw::wsRenameOutput\n'
1043 >        txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n'
1044 >        txt += 'echo ">>> current directory content:"\n'
1045          txt += 'ls \n'
1046 <        file_list = ''
1047 <        for fileWithSuffix in self.output_file:
1046 >        txt += '\n'
1047 >
1048 >        for fileWithSuffix in (self.output_file):
1049              output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
815            file_list=file_list+output_file_num+' '
1050              txt += '\n'
1051              txt += '# check output file\n'
1052 <            txt += 'ls '+fileWithSuffix+'\n'
1053 <            txt += 'exe_result=$?\n'
1054 <            txt += 'if [ $exe_result -ne 0 ] ; then\n'
1055 <            txt += '   echo "ERROR: No output file to manage"\n'
1056 <            txt += '   echo "JOB_EXIT_STATUS = $exe_result"\n'
1057 <            txt += '   echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
1058 <            txt += '   dumpStatus $RUNTIME_AREA/$repo\n'
1059 <            txt += '   rm -f $RUNTIME_AREA/$repo \n'
1060 <            txt += '   echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1061 <            txt += '   echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1062 <            txt += '   echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
829 <            ### OLI_DANIELE
830 <            if common.scheduler.boss_scheduler_name == 'condor_g':
1052 >            txt += 'if [ -e ./'+fileWithSuffix+' ] ; then\n'
1053 >            if (self.copy_data == 1):  # For OSG nodes, file is in $WORKING_DIR, should not be moved to $RUNTIME_AREA
1054 >                txt += '    mv '+fileWithSuffix+' '+output_file_num+'\n'
1055 >                txt += '    ln -s `pwd`/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\n'
1056 >            else:
1057 >                txt += '    mv '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1058 >                txt += '    ln -s $RUNTIME_AREA/'+output_file_num+' $RUNTIME_AREA/'+fileWithSuffix+'\n'
1059 >            txt += 'else\n'
1060 >            txt += '    job_exit_code=60302\n'
1061 >            txt += '    echo "WARNING: Output file '+fileWithSuffix+' not found"\n'
1062 >            if common.scheduler.name().upper() == 'CONDOR_G':
1063                  txt += '    if [ $middleware == OSG ]; then \n'
1064                  txt += '        echo "prepare dummy output file"\n'
1065                  txt += '        echo "Processing of job output failed" > $RUNTIME_AREA/'+output_file_num+'\n'
1066                  txt += '    fi \n'
835            txt += 'else\n'
836            txt += '   cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
1067              txt += 'fi\n'
1068 <      
1069 <        txt += 'cd $RUNTIME_AREA\n'
1070 <        file_list=file_list[:-1]
1071 <        txt += 'file_list="'+file_list+'"\n'
1072 <        ### OLI_DANIELE
1073 <        txt += 'if [ $middleware == OSG ]; then\n'  
1074 <        txt += '    cd $RUNTIME_AREA\n'
1075 <        txt += '    echo "Remove working directory: $WORKING_DIR"\n'
1076 <        txt += '    /bin/rm -rf $WORKING_DIR\n'
847 <        txt += '    if [ -d $WORKING_DIR ] ;then\n'
848 <        txt += '        echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
849 <        txt += '        echo "JOB_EXIT_STATUS = 60999"\n'
850 <        txt += '        echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
851 <        txt += '        dumpStatus $RUNTIME_AREA/$repo\n'
852 <        txt += '        rm -f $RUNTIME_AREA/$repo \n'
853 <        txt += '        echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
854 <        txt += '        echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
855 <        txt += '        echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
856 <        txt += '    fi\n'
857 <        txt += 'fi\n'
1068 >        file_list = []
1069 >        for fileWithSuffix in (self.output_file):
1070 >             file_list.append(self.numberFile_(fileWithSuffix, '$NJob'))
1071 >
1072 >        txt += 'file_list="'+string.join(file_list,' ')+'"\n'
1073 >        txt += '\n'
1074 >        txt += 'echo ">>> current directory (SOFTWARE_DIR): $SOFTWARE_DIR" \n'
1075 >        txt += 'echo ">>> current directory content:"\n'
1076 >        txt += 'ls \n'
1077          txt += '\n'
1078 +        txt += 'cd $RUNTIME_AREA\n'
1079 +        txt += 'echo ">>> current directory (RUNTIME_AREA):  $RUNTIME_AREA"\n'
1080          return txt
1081  
1082      def numberFile_(self, file, txt):
# Line 866 | Line 1087 | class Cmssw(JobType):
1087          # take away last extension
1088          name = p[0]
1089          for x in p[1:-1]:
1090 <           name=name+"."+x
1090 >            name=name+"."+x
1091          # add "_txt"
1092          if len(p)>1:
1093 <          ext = p[len(p)-1]
1094 <          #result = name + '_' + str(txt) + "." + ext
874 <          result = name + '_' + txt + "." + ext
1093 >            ext = p[len(p)-1]
1094 >            result = name + '_' + txt + "." + ext
1095          else:
1096 <          #result = name + '_' + str(txt)
1097 <          result = name + '_' + txt
878 <        
1096 >            result = name + '_' + txt
1097 >
1098          return result
1099  
1100 <    def getRequirements(self):
1100 >    def getRequirements(self, nj=[]):
1101          """
1102 <        return job requirements to add to jdl files
1102 >        return job requirements to add to jdl files
1103          """
1104          req = ''
1105 <        if common.analisys_common_info['sw_version']:
1105 >        if self.version:
1106              req='Member("VO-cms-' + \
1107 <                 common.analisys_common_info['sw_version'] + \
1107 >                 self.version + \
1108                   '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1109 <        if common.analisys_common_info['sites']:
1110 <            if len(common.analisys_common_info['sites'])>0:
1111 <                req = req + ' && ('
1112 <                for i in range(len(common.analisys_common_info['sites'])):
1113 <                    req = req + 'other.GlueCEInfoHostName == "' \
1114 <                         + common.analisys_common_info['sites'][i] + '"'
1115 <                    if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
1116 <                        req = req + ' || '
1117 <            req = req + ')'
1118 <        #print "req = ", req
1109 >        ## SL add requirement for OS version only if SL4
1110 >        #reSL4 = re.compile( r'slc4' )
1111 >        if self.executable_arch: # and reSL4.search(self.executable_arch):
1112 >            req+=' && Member("VO-cms-' + \
1113 >                 self.executable_arch + \
1114 >                 '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
1115 >
1116 >        req = req + ' && (other.GlueHostNetworkAdapterOutboundIP)'
1117 >        if common.scheduler.name() == "glitecoll":
1118 >            req += ' && other.GlueCEStateStatus == "Production" '
1119 >
1120          return req
1121  
1122      def configFilename(self):
1123          """ return the config filename """
1124          return self.name()+'.cfg'
1125  
906    ### OLI_DANIELE
1126      def wsSetupCMSOSGEnvironment_(self):
1127          """
1128          Returns part of a job script which is prepares
1129          the execution environment and which is common for all CMS jobs.
1130          """
1131 <        txt = '\n'
1132 <        txt += '   echo "### SETUP CMS OSG  ENVIRONMENT ###"\n'
1133 <        txt += '   if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
1134 <        txt += '      # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
1135 <        txt += '       source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
1136 <        txt += '   elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
1137 <        txt += '      # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
1138 <        txt += '       source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
1139 <        txt += '   else\n'
1140 <        txt += '       echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
1141 <        txt += '       echo "JOB_EXIT_STATUS = 10020"\n'
1142 <        txt += '       echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1143 <        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
925 <        txt += '       rm -f $RUNTIME_AREA/$repo \n'
926 <        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
927 <        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
928 <        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
929 <        txt += '       exit 1\n'
930 <        txt += '\n'
931 <        txt += '       echo "Remove working directory: $WORKING_DIR"\n'
932 <        txt += '       cd $RUNTIME_AREA\n'
933 <        txt += '       /bin/rm -rf $WORKING_DIR\n'
934 <        txt += '       if [ -d $WORKING_DIR ] ;then\n'
935 <        txt += '            echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
936 <        txt += '            echo "JOB_EXIT_STATUS = 10017"\n'
937 <        txt += '            echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
938 <        txt += '            dumpStatus $RUNTIME_AREA/$repo\n'
939 <        txt += '            rm -f $RUNTIME_AREA/$repo \n'
940 <        txt += '            echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
941 <        txt += '            echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
942 <        txt += '            echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
943 <        txt += '       fi\n'
944 <        txt += '\n'
945 <        txt += '       exit 1\n'
946 <        txt += '   fi\n'
1131 >        txt = '\n#Written by cms_cmssw::wsSetupCMSOSGEnvironment_\n'
1132 >        txt += '    echo ">>> setup CMS OSG environment:"\n'
1133 >        txt += '    echo "set SCRAM ARCH to ' + self.executable_arch + '"\n'
1134 >        txt += '    export SCRAM_ARCH='+self.executable_arch+'\n'
1135 >        txt += '    echo "SCRAM_ARCH = $SCRAM_ARCH"\n'
1136 >        txt += '    if [ -f $OSG_APP/cmssoft/cms/cmsset_default.sh ] ;then\n'
1137 >        txt += '      # Use $OSG_APP/cmssoft/cms/cmsset_default.sh to setup cms software\n'
1138 >        txt += '        source $OSG_APP/cmssoft/cms/cmsset_default.sh '+self.version+'\n'
1139 >        txt += '    else\n'
1140 >        txt += '        echo "ERROR ==> $OSG_APP/cmssoft/cms/cmsset_default.sh file not found"\n'
1141 >        txt += '        job_exit_code=10020\n'
1142 >        txt += '        func_exit\n'
1143 >        txt += '    fi\n'
1144          txt += '\n'
1145 <        txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1146 <        txt += '   echo " END SETUP CMS OSG  ENVIRONMENT "\n'
1145 >        txt += '    echo "==> setup cms environment ok"\n'
1146 >        txt += '    echo "SCRAM_ARCH = $SCRAM_ARCH"\n'
1147  
1148          return txt
1149 <
1149 >
1150      ### OLI_DANIELE
1151      def wsSetupCMSLCGEnvironment_(self):
1152          """
1153          Returns part of a job script which is prepares
1154          the execution environment and which is common for all CMS jobs.
1155          """
1156 <        txt  = '   \n'
1157 <        txt += '   echo " ### SETUP CMS LCG  ENVIRONMENT ### "\n'
1158 <        txt += '   if [ ! $VO_CMS_SW_DIR ] ;then\n'
1159 <        txt += '       echo "SET_CMS_ENV 10031 ==> ERROR CMS software dir not found on WN `hostname`"\n'
1160 <        txt += '       echo "JOB_EXIT_STATUS = 10031" \n'
1161 <        txt += '       echo "JobExitCode=10031" | tee -a $RUNTIME_AREA/$repo\n'
1162 <        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1163 <        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1164 <        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1165 <        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1166 <        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1167 <        txt += '       exit 1\n'
1168 <        txt += '   else\n'
1169 <        txt += '       echo "Sourcing environment... "\n'
1170 <        txt += '       if [ ! -s $VO_CMS_SW_DIR/cmsset_default.sh ] ;then\n'
1171 <        txt += '           echo "SET_CMS_ENV 10020 ==> ERROR cmsset_default.sh file not found into dir $VO_CMS_SW_DIR"\n'
1172 <        txt += '           echo "JOB_EXIT_STATUS = 10020"\n'
1173 <        txt += '           echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
1174 <        txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1175 <        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1176 <        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1177 <        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1178 <        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1179 <        txt += '           exit 1\n'
1180 <        txt += '       fi\n'
1181 <        txt += '       echo "sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1182 <        txt += '       source $VO_CMS_SW_DIR/cmsset_default.sh\n'
1183 <        txt += '       result=$?\n'
1184 <        txt += '       if [ $result -ne 0 ]; then\n'
1185 <        txt += '           echo "SET_CMS_ENV 10032 ==> ERROR problem sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1186 <        txt += '           echo "JOB_EXIT_STATUS = 10032"\n'
1187 <        txt += '           echo "JobExitCode=10032" | tee -a $RUNTIME_AREA/$repo\n'
1188 <        txt += '           dumpStatus $RUNTIME_AREA/$repo\n'
1189 <        txt += '           rm -f $RUNTIME_AREA/$repo \n'
1190 <        txt += '           echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1191 <        txt += '           echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1192 <        txt += '           echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1193 <        txt += '           exit 1\n'
1194 <        txt += '       fi\n'
1195 <        txt += '   fi\n'
1196 <        txt += '   \n'
1197 <        txt += '   string=`cat /etc/redhat-release`\n'
1198 <        txt += '   echo $string\n'
1199 <        txt += '   if [[ $string = *alhalla* ]]; then\n'
1200 <        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1201 <        txt += '   elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
1202 <        txt += '       export SCRAM_ARCH=slc3_ia32_gcc323\n'
1203 <        txt += '       echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
1204 <        txt += '   else\n'
1205 <        txt += '       echo "SET_CMS_ENV 10033 ==> ERROR OS unknown, LCG environment not initialized"\n'
1206 <        txt += '       echo "JOB_EXIT_STATUS = 10033"\n'
1207 <        txt += '       echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
1208 <        txt += '       dumpStatus $RUNTIME_AREA/$repo\n'
1209 <        txt += '       rm -f $RUNTIME_AREA/$repo \n'
1210 <        txt += '       echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1211 <        txt += '       echo "SyncGridJobId=`echo $SyncGridJobId`" | tee -a $RUNTIME_AREA/$repo \n'
1212 <        txt += '       echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1213 <        txt += '       exit 1\n'
1214 <        txt += '   fi\n'
1215 <        txt += '   echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1216 <        txt += '   echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1156 >        txt = '\n#Written by cms_cmssw::wsSetupCMSLCGEnvironment_\n'
1157 >        txt += '    echo ">>> setup CMS LCG environment:"\n'
1158 >        txt += '    echo "set SCRAM ARCH and BUILD_ARCH to ' + self.executable_arch + ' ###"\n'
1159 >        txt += '    export SCRAM_ARCH='+self.executable_arch+'\n'
1160 >        txt += '    export BUILD_ARCH='+self.executable_arch+'\n'
1161 >        txt += '    if [ ! $VO_CMS_SW_DIR ] ;then\n'
1162 >        txt += '        echo "ERROR ==> CMS software dir not found on WN `hostname`"\n'
1163 >        txt += '        job_exit_code=10031\n'
1164 >        txt += '        func_exit\n'
1165 >        txt += '    else\n'
1166 >        txt += '        echo "Sourcing environment... "\n'
1167 >        txt += '        if [ ! -s $VO_CMS_SW_DIR/cmsset_default.sh ] ;then\n'
1168 >        txt += '            echo "ERROR ==> cmsset_default.sh file not found into dir $VO_CMS_SW_DIR"\n'
1169 >        txt += '            job_exit_code=10020\n'
1170 >        txt += '            func_exit\n'
1171 >        txt += '        fi\n'
1172 >        txt += '        echo "sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1173 >        txt += '        source $VO_CMS_SW_DIR/cmsset_default.sh\n'
1174 >        txt += '        result=$?\n'
1175 >        txt += '        if [ $result -ne 0 ]; then\n'
1176 >        txt += '            echo "ERROR ==> problem sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
1177 >        txt += '            job_exit_code=10032\n'
1178 >        txt += '            func_exit\n'
1179 >        txt += '        fi\n'
1180 >        txt += '    fi\n'
1181 >        txt += '    \n'
1182 >        txt += '    echo "==> setup cms environment ok"\n'
1183 >        return txt
1184 >
1185 >    ### FEDE FOR DBS OUTPUT PUBLICATION
1186 >    def modifyReport(self, nj):
1187 >        """
1188 >        insert the part of the script that modifies the FrameworkJob Report
1189 >        """
1190 >
1191 >        txt = '\n#Written by cms_cmssw::modifyReport\n'
1192 >        publish_data = int(self.cfg_params.get('USER.publish_data',0))
1193 >        if (publish_data == 1):
1194 >            processedDataset = self.cfg_params['USER.publish_data_name']
1195 >            LFNBaseName = LFNBase(processedDataset)
1196 >
1197 >            txt += 'if [ $copy_exit_status -eq 0 ]; then\n'
1198 >            txt += '    FOR_LFN=%s_${PSETHASH}/\n'%(LFNBaseName)
1199 >            txt += 'else\n'
1200 >            txt += '    FOR_LFN=/copy_problems/ \n'
1201 >            txt += '    SE=""\n'
1202 >            txt += '    SE_PATH=""\n'
1203 >            txt += 'fi\n'
1204 >            
1205 >            txt += 'echo ">>> Modify Job Report:" \n'
1206 >            txt += 'chmod a+x $SOFTWARE_DIR/ProdCommon/ProdCommon/FwkJobRep/ModifyJobReport.py\n'
1207 >            txt += 'ProcessedDataset='+processedDataset+'\n'
1208 >            txt += 'echo "ProcessedDataset = $ProcessedDataset"\n'
1209 >            txt += 'echo "SE = $SE"\n'
1210 >            txt += 'echo "SE_PATH = $SE_PATH"\n'
1211 >            txt += 'echo "FOR_LFN = $FOR_LFN" \n'
1212 >            txt += 'echo "CMSSW_VERSION = $CMSSW_VERSION"\n\n'
1213 >            txt += 'echo "$SOFTWARE_DIR/ProdCommon/ProdCommon/FwkJobRep/ModifyJobReport.py $RUNTIME_AREA/crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH"\n'
1214 >            txt += '$SOFTWARE_DIR/ProdCommon/ProdCommon/FwkJobRep/ModifyJobReport.py $RUNTIME_AREA/crab_fjr_$NJob.xml $NJob $FOR_LFN $PrimaryDataset $DataTier $ProcessedDataset $ApplicationFamily $executable $CMSSW_VERSION $PSETHASH $SE $SE_PATH\n'
1215 >            txt += 'modifyReport_result=$?\n'
1216 >            txt += 'if [ $modifyReport_result -ne 0 ]; then\n'
1217 >            txt += '    modifyReport_result=70500\n'
1218 >            txt += '    job_exit_code=$modifyReport_result\n'
1219 >            txt += '    echo "ModifyReportResult=$modifyReport_result" | tee -a $RUNTIME_AREA/$repo\n'
1220 >            txt += '    echo "WARNING: Problem with ModifyJobReport"\n'
1221 >            txt += 'else\n'
1222 >            txt += '    mv NewFrameworkJobReport.xml $RUNTIME_AREA/crab_fjr_$NJob.xml\n'
1223 >            txt += 'fi\n'
1224          return txt
1225  
1226      def setParam_(self, param, value):
# Line 1025 | Line 1229 | class Cmssw(JobType):
1229      def getParams(self):
1230          return self._params
1231  
1232 <    def setTaskid_(self):
1233 <        self._taskId = self.cfg_params['taskId']
1234 <        
1235 <    def getTaskid(self):
1236 <        return self._taskId
1232 >    def uniquelist(self, old):
1233 >        """
1234 >        remove duplicates from a list
1235 >        """
1236 >        nd={}
1237 >        for e in old:
1238 >            nd[e]=0
1239 >        return nd.keys()
1240 >
1241 >    def outList(self):
1242 >        """
1243 >        check the dimension of the output files
1244 >        """
1245 >        txt = ''
1246 >        txt += 'echo ">>> list of expected files on output sandbox"\n'
1247 >        listOutFiles = []
1248 >        stdout = 'CMSSW_$NJob.stdout'
1249 >        stderr = 'CMSSW_$NJob.stderr'
1250 >        if (self.return_data == 1):
1251 >            for file in (self.output_file+self.output_file_sandbox):
1252 >                listOutFiles.append(self.numberFile_(file, '$NJob'))
1253 >            listOutFiles.append(stdout)
1254 >            listOutFiles.append(stderr)
1255 >        else:
1256 >            for file in (self.output_file_sandbox):
1257 >                listOutFiles.append(self.numberFile_(file, '$NJob'))
1258 >            listOutFiles.append(stdout)
1259 >            listOutFiles.append(stderr)
1260 >        txt += 'echo "output files: '+string.join(listOutFiles,' ')+'"\n'
1261 >        txt += 'filesToCheck="'+string.join(listOutFiles,' ')+'"\n'
1262 >        txt += 'export filesToCheck\n'
1263 >        return txt

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines