ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/Splitter.py
(Generate patch)

Comparing COMP/CRAB/python/Splitter.py (file contents):
Revision 1.7 by spiga, Wed Feb 11 22:13:03 2009 UTC vs.
Revision 1.20 by slacapra, Wed Jun 10 13:11:05 2009 UTC

# Line 1 | Line 1
1   import common
2 from crab_logger import Logger
2   from crab_exceptions import *
3   from crab_util import *
4   from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser
# Line 10 | Line 9 | class JobSplitter:
9          self.args=args
10          #self.maxEvents
11          # init BlackWhiteListParser
12 <        seWhiteList = cfg_params.get('EDG.se_white_list',[])
13 <        seBlackList = cfg_params.get('EDG.se_black_list',[])
14 <        self.blackWhiteListParser = SEBlackWhiteListParser(seWhiteList, seBlackList, common.logger)
12 >        seWhiteList = cfg_params.get('GRID.se_white_list',[])
13 >        seBlackList = cfg_params.get('GRID.se_black_list',[])
14 >        self.blackWhiteListParser = SEBlackWhiteListParser(seWhiteList, seBlackList, common.logger())
15  
16  
17      def checkUserSettings(self):
# Line 92 | Line 91 | class JobSplitter:
91          # If user requested more events than are in the dataset
92          elif (totalEventsRequested > self.maxEvents):
93              eventsRemaining = self.maxEvents
94 <            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
94 >            common.logger.info("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
95          # If user requested less events than are in the dataset
96          else:
97              eventsRemaining = totalEventsRequested
# Line 108 | Line 107 | class JobSplitter:
107              eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
108  
109          if (self.selectNumberOfJobs):
110 <            common.logger.message("May not create the exact number_of_jobs requested.")
110 >            common.logger.info("May not create the exact number_of_jobs requested.")
111  
112          # old... to remove Daniele
113          totalNumberOfJobs = 999999999
# Line 125 | Line 124 | class JobSplitter:
124          jobsOfBlock = {}
125  
126          parString = ""
127 +        pString = ""
128          filesEventCount = 0
129  
130          # ---- Iterate over the blocks in the dataset until ---- #
# Line 137 | Line 137 | class JobSplitter:
137  
138              if self.eventsbyblock.has_key(block) :
139                  numEventsInBlock = self.eventsbyblock[block]
140 <                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
140 >                common.logger.debug('Events in Block File '+str(numEventsInBlock))
141  
142                  files = filesbyblock[block]
143                  numFilesInBlock = len(files)
# Line 147 | Line 147 | class JobSplitter:
147                  if noBboundary == 0: # DD
148                      # ---- New block => New job ---- #
149                      parString = ""
150 +                    pString=""
151                      # counter for number of events in files currently worked on
152                      filesEventCount = 0
153                  # flag if next while loop should touch new file
# Line 156 | Line 157 | class JobSplitter:
157  
158                  # ---- Iterate over the files in the block until we've met the requested ---- #
159                  # ---- total # of events or we've gone over all the files in this block  ---- #
160 <                pString=''
160 >                msg='\n'
161                  while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
162                      file = files[fileCount]
163                      if self.useParent==1:
164                          parent = self.parentFiles[file]
165 <                        for f in parent :
165 <                            pString += '\\\"' + f + '\\\"\,'
166 <                        common.logger.debug(6, "File "+str(file)+" has the following parents: "+str(parent))
167 <                        common.logger.write("File "+str(file)+" has the following parents: "+str(parent))
165 >                        common.logger.log(10-1, "File "+str(file)+" has the following parents: "+str(parent))
166                      if newFile :
167                          try:
168                              numEventsInFile = self.eventsbyfile[file]
169 <                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
169 >                            common.logger.log(10-1, "File "+str(file)+" has "+str(numEventsInFile)+" events")
170                              # increase filesEventCount
171                              filesEventCount += numEventsInFile
172                              # Add file to current job
173 <                            parString += '\\\"' + file + '\\\"\,'
173 >                            parString +=  file + ','
174 >                            if self.useParent==1:
175 >                                for f in parent :
176 >                                    pString += f  + ','
177                              newFile = 0
178                          except KeyError:
179 <                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
179 >                            common.logger.info("File "+str(file)+" has unknown number of events: skipping")
180  
181                      eventsPerJobRequested = min(eventsPerJobRequested, eventsRemaining)
182                      # if less events in file remain than eventsPerJobRequested
# Line 188 | Line 189 | class JobSplitter:
189                              if ( fileCount == numFilesInBlock-1 ) :
190                                  # end job using last file, use remaining events in block
191                                  # close job and touch new file
192 <                                fullString = parString[:-2]
192 >                                fullString = parString[:-1]
193                                  if self.useParent==1:
194 <                                    fullParentString = pString[:-2]
194 >                                    fullParentString = pString[:-1]
195                                      list_of_lists.append([fullString,fullParentString,str(-1),str(jobSkipEventCount)])
196                                  else:
197                                      list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
198 <                                common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
198 >                                msg += "Job %s can run over %s  events (last file in block).\n"%(str(jobCount+1), str(filesEventCount - jobSkipEventCount))
199                                  jobDestination.append(blockSites[block])
200 <                                common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(jobDestination[jobCount]))
200 >                                msg += "Job %s Destination: %s\n"%(str(jobCount+1),str(SE2CMS(jobDestination[jobCount])))
201                                  # fill jobs of block dictionary
202                                  jobsOfBlock[block].append(jobCount+1)
203                                  # reset counter
# Line 217 | Line 218 | class JobSplitter:
218                      # if events in file equal to eventsPerJobRequested
219                      elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
220                          # close job and touch new file
221 <                        fullString = parString[:-2]
221 >                        fullString = parString[:-1]
222                          if self.useParent==1:
223 <                            fullParentString = pString[:-2]
223 >                            fullParentString = pString[:-1]
224                              list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)])
225                          else:
226                              list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
227 <                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
227 >                        msg += "Job %s can run over %s events.\n"%(str(jobCount+1),str(eventsPerJobRequested))
228                          jobDestination.append(blockSites[block])
229 <                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(jobDestination[jobCount]))
229 >                        msg+= "Job %s Destination: %s\n"%(str(jobCount+1),str(SE2CMS(jobDestination[jobCount])))
230                          jobsOfBlock[block].append(jobCount+1)
231                          # reset counter
232                          jobCount = jobCount + 1
# Line 242 | Line 243 | class JobSplitter:
243                      # if more events in file remain than eventsPerJobRequested
244                      else :
245                          # close job but don't touch new file
246 <                        fullString = parString[:-2]
246 >                        fullString = parString[:-1]
247                          if self.useParent==1:
248 <                            fullParentString = pString[:-2]
248 >                            fullParentString = pString[:-1]
249                              list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)])
250                          else:
251                              list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
252 <                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
252 >                        msg += "Job %s can run over %s events.\n"%(str(jobCount+1),str(eventsPerJobRequested))
253                          jobDestination.append(blockSites[block])
254 <                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(jobDestination[jobCount]))
254 >                        msg+= "Job %s Destination: %s\n"%(str(jobCount+1),str(SE2CMS(jobDestination[jobCount])))
255                          jobsOfBlock[block].append(jobCount+1)
256                          # increase counter
257                          jobCount = jobCount + 1
# Line 261 | Line 262 | class JobSplitter:
262                          jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
263                          # remove all but the last file
264                          filesEventCount = self.eventsbyfile[file]
265 +                        pString_tmp=''
266                          if self.useParent==1:
267 <                            for f in parent : pString += '\\\"' + f + '\\\"\,'
268 <                        parString = '\\\"' + file + '\\\"\,'
267 >                            for f in parent : pString_tmp +=  f + ','
268 >                        pString =  pString_tmp
269 >                        parString =  file + ','
270                      pass # END if
271                  pass # END while (iterate over files in the block)
272          pass # END while (iterate over blocks in the dataset)
273 +        common.logger.debug(msg)
274          self.ncjobs = self.total_number_of_jobs = jobCount
275          if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
276 <            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
277 <        common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
276 >            common.logger.info("Could not run on all requested events because some blocks not hosted at allowed sites.")
277 >        common.logger.info(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
278  
279          # skip check on  block with no sites  DD
280          if noBboundary == 0 : self.checkBlockNoSite(blocks,jobsOfBlock,blockSites)
281  
282         # prepare dict output
283          dictOut = {}
284 +        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents']
285 +        if self.useParent: dictOut['params']= ['InputFiles','ParentFiles','MaxEvents','SkipEvents']
286          dictOut['args'] = list_of_lists
287          dictOut['jobDestination'] = jobDestination
288          dictOut['njobs']=self.total_number_of_jobs
# Line 290 | Line 296 | class JobSplitter:
296          screenOutput = "List of jobs and available destination sites:\n\n"
297          noSiteBlock = []
298          bloskNoSite = []
299 +        allBlock = []
300  
301          blockCounter = 0
302          for block in blocks:
303              if block in jobsOfBlock.keys() :
304                  blockCounter += 1
305 +                allBlock.append( blockCounter )
306 +                sites=self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],[block]),[block])
307                  screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),
308 <                    ','.join(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],[block]),[block])))
309 <                if len(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],[block]),[block])) == 0:
308 >                    ', '.join(SE2CMS(sites)))
309 >                if len(sites) == 0:
310                      noSiteBlock.append( spanRanges(jobsOfBlock[block]) )
311                      bloskNoSite.append( blockCounter )
312  
313 <        common.logger.message(screenOutput)
313 >        common.logger.info(screenOutput)
314          if len(noSiteBlock) > 0 and len(bloskNoSite) > 0:
315              msg = 'WARNING: No sites are hosting any part of data for block:\n                '
316              virgola = ""
# Line 316 | Line 325 | class JobSplitter:
325              for range_jobs in noSiteBlock:
326                  msg += str(range_jobs) + virgola
327              msg += '\n               will not be submitted and this block of data can not be analyzed!\n'
328 <            if self.cfg_params.has_key('EDG.se_white_list'):
329 <                msg += 'WARNING: SE White List: '+self.cfg_params['EDG.se_white_list']+'\n'
328 >            if self.cfg_params.has_key('GRID.se_white_list'):
329 >                msg += 'WARNING: SE White List: '+self.cfg_params['GRID.se_white_list']+'\n'
330                  msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
331                  msg += 'Please check if the dataset is available at this site!)\n'
332 <            if self.cfg_params.has_key('EDG.ce_white_list'):
333 <                msg += 'WARNING: CE White List: '+self.cfg_params['EDG.ce_white_list']+'\n'
332 >            if self.cfg_params.has_key('GRID.ce_white_list'):
333 >                msg += 'WARNING: CE White List: '+self.cfg_params['GRID.ce_white_list']+'\n'
334                  msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
335                  msg += 'Please check if the dataset is available at this site!)\n'
336  
337 <            common.logger.message(msg)
337 >            common.logger.info(msg)
338 >
339 >        if bloskNoSite == allBlock:
340 >            raise CrabException('No jobs created')
341  
342          return
343  
# Line 354 | Line 366 | class JobSplitter:
366          thefiles = Fileset(name='FilesToSplit')
367          fileList = pubdata.getListFiles()
368          for f in fileList:
357           # print f
369              block = f['Block']['Name']
359          #  if not blocks.has_key(block):
360          #      blocks[block] = reader.listFileBlockLocation(block)
370              try:
371                  f['Block']['StorageElementList'].extend(blockSites[block])
372              except:
# Line 386 | Line 395 | class JobSplitter:
395          set = Set(runList)
396          list_of_lists = []
397          jobDestination = []
389
398          count = 0
399 <        for i in list(set):
399 >        for jobGroup in  jobfactory():
400              if count <  self.theNumberOfJobs:
401 <                res = self.getJobInfo(jobfactory())
401 >                res = self.getJobInfo(jobGroup)
402                  parString = ''
403                  for file in res['lfns']:
404 <                    parString += '\\\"' + file + '\\\"\,'
405 <                fullString = parString[:-2]
404 >                    parString += file + ','
405 >                fullString = parString[:-1]
406                  list_of_lists.append([fullString,str(-1),str(0)])    
407                  #need to check single file location
408                  jobDestination.append(res['locations'])  
409                  count +=1
402        #print jobDestination
410         # prepare dict output
411          dictOut = {}
412 +        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents']
413          dictOut['args'] = list_of_lists
414          dictOut['jobDestination'] = jobDestination
415          dictOut['njobs']=count
# Line 419 | Line 427 | class JobSplitter:
427                  for loc in file['locations']:
428                      if tmp_check < 1 :
429                          locations.append(loc)
430 <                    tmp_check = tmp_check + 1
430 >                tmp_check = tmp_check + 1
431                  ### qui va messo il check per la locations
432          res['lfns'] = lfns
433          res['locations'] = locations
# Line 430 | Line 438 | class JobSplitter:
438          """
439          Perform job splitting based on number of event per job
440          """
441 <        common.logger.debug(5,'Splitting per events')
441 >        common.logger.debug('Splitting per events')
442          self.checkUserSettings()
443          jobDestination=[]
444          if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
# Line 442 | Line 450 | class JobSplitter:
450          firstRun = self.cfg_params.get('CMSSW.first_run',None)
451  
452          if (self.selectEventsPerJob):
453 <            common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
453 >            common.logger.info('Required '+str(self.eventsPerJob)+' events per job ')
454          if (self.selectNumberOfJobs):
455 <            common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
455 >            common.logger.info('Required '+str(self.theNumberOfJobs)+' jobs in total ')
456          if (self.selectTotalNumberEvents):
457 <            common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
457 >            common.logger.info('Required '+str(self.total_number_of_events)+' events in total ')
458  
459          if (self.total_number_of_events < 0):
460              msg='Cannot split jobs per Events with "-1" as total number of events'
# Line 463 | Line 471 | class JobSplitter:
471              self.total_number_of_jobs = self.theNumberOfJobs
472              self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
473  
474 <        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
474 >        common.logger.debug('N jobs  '+str(self.total_number_of_jobs))
475  
476          # is there any remainder?
477          check = int(self.total_number_of_events) - (int(self.total_number_of_jobs)*self.eventsPerJob)
478  
479 <        common.logger.debug(5,'Check  '+str(check))
479 >        common.logger.debug('Check  '+str(check))
480  
481 <        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
481 >        common.logger.info(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
482          if check > 0:
483 <            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
483 >            common.logger.info('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
484  
485          # argument is seed number.$i
486          self.list_of_args = []
# Line 492 | Line 500 | class JobSplitter:
500              args.append(str(self.eventsPerJob))
501              self.list_of_args.append(args)
502         # prepare dict output
503 +
504          dictOut = {}
505 +        dictOut['params'] = ['MaxEvents']
506 +        if (firstRun):
507 +            dictOut['params'] = ['FirstRun','MaxEvents']
508 +            if ( generator in managedGenerators ) : dictOut['params'] = ['FirstRun', 'FirstEvent', 'MaxEvents']
509 +        else:  
510 +            if (generator in managedGenerators) : dictOut['params'] = ['FirstEvent', 'MaxEvents']
511          dictOut['args'] = self.list_of_args
512          dictOut['jobDestination'] = jobDestination
513          dictOut['njobs']=self.total_number_of_jobs
# Line 509 | Line 524 | class JobSplitter:
524              msg = 'must specify  number_of_jobs.'
525              raise crabexception(msg)
526          jobDestination = []
527 <        common.logger.debug(5,'Splitting per job')
528 <        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
527 >        common.logger.debug('Splitting per job')
528 >        common.logger.info('Required '+str(self.theNumberOfJobs)+' jobs in total ')
529  
530          self.total_number_of_jobs = self.theNumberOfJobs
531  
532 <        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
532 >        common.logger.debug('N jobs  '+str(self.total_number_of_jobs))
533  
534 <        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
534 >        common.logger.info(str(self.total_number_of_jobs)+' jobs can be created')
535  
536          # argument is seed number.$i
537 <        self.list_of_args = []
537 >        #self.list_of_args = []
538          for i in range(self.total_number_of_jobs):
539              jobDestination.append([""])
540 <            self.list_of_args.append([str(i)])
540 >        #   self.list_of_args.append([str(i)])
541  
542         # prepare dict output
543          dictOut = {}
544 <        dictOut['args'] = self.list_of_args
544 >        dictOut['args'] = [] # self.list_of_args
545          dictOut['jobDestination'] = jobDestination
546          dictOut['njobs']=self.total_number_of_jobs
547          return dictOut

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines