ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/Splitter.py
(Generate patch)

Comparing COMP/CRAB/python/Splitter.py (file contents):
Revision 1.2 by spiga, Wed Feb 4 15:09:03 2009 UTC vs.
Revision 1.16 by spiga, Fri Jun 5 13:06:01 2009 UTC

# Line 1 | Line 1
1   import common
2 from crab_logger import Logger
2   from crab_exceptions import *
3   from crab_util import *
4   from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser
# Line 7 | Line 6 | from WMCore.SiteScreening.BlackWhiteList
6   class JobSplitter:
7      def __init__( self, cfg_params,  args ):
8          self.cfg_params = cfg_params
9 <        self.blockSites = args['blockSites']
11 <        self.pubdata = args['pubdata']
9 >        self.args=args
10          #self.maxEvents
13        self.jobDestination=[]  # Site destination(s) for each job (list of lists)
11          # init BlackWhiteListParser
12 <        seWhiteList = cfg_params.get('EDG.se_white_list',[])
13 <        seBlackList = cfg_params.get('EDG.se_black_list',[])
12 >        seWhiteList = cfg_params.get('GRID.se_white_list',[])
13 >        seBlackList = cfg_params.get('GRID.se_black_list',[])
14          self.blackWhiteListParser = SEBlackWhiteListParser(seWhiteList, seBlackList, common.logger)
15  
16  
# Line 55 | Line 52 | class JobSplitter:
52          REQUIRES: self.selectTotalNumberEvents, self.selectEventsPerJob, self.selectNumberofJobs,
53                    self.total_number_of_events, self.eventsPerJob, self.theNumberOfJobs,
54                    self.maxEvents, self.filesbyblock
55 <        SETS: self.jobDestination - Site destination(s) for each job (a list of lists)
55 >        SETS: jobDestination - Site destination(s) for each job (a list of lists)
56                self.total_number_of_jobs - Total # of jobs
57                self.list_of_args - File(s) job will run on (a list of lists)
58          """
59  
60 +        jobDestination=[]  
61          self.checkUserSettings()
62          if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
63              msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
64              raise CrabException(msg)
65  
66 <        self.filesbyblock=self.pubdata.getFiles()
67 <
68 <        self.eventsbyblock=self.pubdata.getEventsPerBlock()
69 <        self.eventsbyfile=self.pubdata.getEventsPerFile()
70 <        self.parentFiles=self.pubdata.getParent()
66 >        blockSites = self.args['blockSites']
67 >        pubdata = self.args['pubdata']
68 >        filesbyblock=pubdata.getFiles()
69 >
70 >        self.eventsbyblock=pubdata.getEventsPerBlock()
71 >        self.eventsbyfile=pubdata.getEventsPerFile()
72 >        self.parentFiles=pubdata.getParent()
73  
74          ## get max number of events
75 <        self.maxEvents=self.pubdata.getMaxEvents()
75 >        self.maxEvents=pubdata.getMaxEvents()
76  
77          self.useParent = int(self.cfg_params.get('CMSSW.use_parent',0))
78          noBboundary = int(self.cfg_params.get('CMSSW.no_block_boundary',0))
# Line 91 | Line 91 | class JobSplitter:
91          # If user requested more events than are in the dataset
92          elif (totalEventsRequested > self.maxEvents):
93              eventsRemaining = self.maxEvents
94 <            common.logger.message("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
94 >            common.logger.info("Requested "+str(self.total_number_of_events)+ " events, but only "+str(self.maxEvents)+" events are available.")
95          # If user requested less events than are in the dataset
96          else:
97              eventsRemaining = totalEventsRequested
# Line 107 | Line 107 | class JobSplitter:
107              eventsPerJobRequested = int(eventsRemaining/self.theNumberOfJobs)
108  
109          if (self.selectNumberOfJobs):
110 <            common.logger.message("May not create the exact number_of_jobs requested.")
110 >            common.logger.info("May not create the exact number_of_jobs requested.")
111  
112          # old... to remove Daniele
113          totalNumberOfJobs = 999999999
114  
115 <        blocks = self.blockSites.keys()
115 >        blocks = blockSites.keys()
116          blockCount = 0
117          # Backup variable in case self.maxEvents counted events in a non-included block
118          numBlocksInDataset = len(blocks)
# Line 124 | Line 124 | class JobSplitter:
124          jobsOfBlock = {}
125  
126          parString = ""
127 +        pString = ""
128          filesEventCount = 0
129  
130          # ---- Iterate over the blocks in the dataset until ---- #
# Line 136 | Line 137 | class JobSplitter:
137  
138              if self.eventsbyblock.has_key(block) :
139                  numEventsInBlock = self.eventsbyblock[block]
140 <                common.logger.debug(5,'Events in Block File '+str(numEventsInBlock))
140 >                common.logger.debug('Events in Block File '+str(numEventsInBlock))
141  
142 <                files = self.filesbyblock[block]
142 >                files = filesbyblock[block]
143                  numFilesInBlock = len(files)
144                  if (numFilesInBlock <= 0):
145                      continue
# Line 146 | Line 147 | class JobSplitter:
147                  if noBboundary == 0: # DD
148                      # ---- New block => New job ---- #
149                      parString = ""
150 +                    pString=""
151                      # counter for number of events in files currently worked on
152                      filesEventCount = 0
153                  # flag if next while loop should touch new file
# Line 155 | Line 157 | class JobSplitter:
157  
158                  # ---- Iterate over the files in the block until we've met the requested ---- #
159                  # ---- total # of events or we've gone over all the files in this block  ---- #
160 <                pString=''
160 >                msg='\n'
161                  while ( (eventsRemaining > 0) and (fileCount < numFilesInBlock) and (jobCount < totalNumberOfJobs) ):
162                      file = files[fileCount]
163                      if self.useParent==1:
164                          parent = self.parentFiles[file]
165 <                        for f in parent :
164 <                            pString += '\\\"' + f + '\\\"\,'
165 <                        common.logger.debug(6, "File "+str(file)+" has the following parents: "+str(parent))
166 <                        common.logger.write("File "+str(file)+" has the following parents: "+str(parent))
165 >                        common.logger.log(10-1, "File "+str(file)+" has the following parents: "+str(parent))
166                      if newFile :
167                          try:
168                              numEventsInFile = self.eventsbyfile[file]
169 <                            common.logger.debug(6, "File "+str(file)+" has "+str(numEventsInFile)+" events")
169 >                            common.logger.log(10-1, "File "+str(file)+" has "+str(numEventsInFile)+" events")
170                              # increase filesEventCount
171                              filesEventCount += numEventsInFile
172                              # Add file to current job
173 <                            parString += '\\\"' + file + '\\\"\,'
173 >                            parString +=  file + ','
174 >                            if self.useParent==1:
175 >                                for f in parent :
176 >                                    pString += f  + ','
177                              newFile = 0
178                          except KeyError:
179 <                            common.logger.message("File "+str(file)+" has unknown number of events: skipping")
179 >                            common.logger.info("File "+str(file)+" has unknown number of events: skipping")
180  
181                      eventsPerJobRequested = min(eventsPerJobRequested, eventsRemaining)
182                      # if less events in file remain than eventsPerJobRequested
# Line 187 | Line 189 | class JobSplitter:
189                              if ( fileCount == numFilesInBlock-1 ) :
190                                  # end job using last file, use remaining events in block
191                                  # close job and touch new file
192 <                                fullString = parString[:-2]
192 >                                fullString = parString[:-1]
193                                  if self.useParent==1:
194 <                                    fullParentString = pString[:-2]
194 >                                    fullParentString = pString[:-1]
195                                      list_of_lists.append([fullString,fullParentString,str(-1),str(jobSkipEventCount)])
196                                  else:
197                                      list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
198 <                                common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(filesEventCount - jobSkipEventCount)+" events (last file in block).")
199 <                                self.jobDestination.append(self.blockSites[block])
200 <                                common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
198 >                                msg += "Job %s can run over %s  events (last file in block).\n"%(str(jobCount+1), str(filesEventCount - jobSkipEventCount))
199 >                                jobDestination.append(blockSites[block])
200 >                                msg += "Job %s Destination: %s\n"%(str(jobCount+1),str(jobDestination[jobCount]))
201                                  # fill jobs of block dictionary
202                                  jobsOfBlock[block].append(jobCount+1)
203                                  # reset counter
# Line 216 | Line 218 | class JobSplitter:
218                      # if events in file equal to eventsPerJobRequested
219                      elif ( filesEventCount - jobSkipEventCount == eventsPerJobRequested ) :
220                          # close job and touch new file
221 <                        fullString = parString[:-2]
221 >                        fullString = parString[:-1]
222                          if self.useParent==1:
223 <                            fullParentString = pString[:-2]
223 >                            fullParentString = pString[:-1]
224                              list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)])
225                          else:
226                              list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
227 <                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
228 <                        self.jobDestination.append(self.blockSites[block])
229 <                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
227 >                        msg += "Job %s can run over %s events.\n"%(str(jobCount+1),str(eventsPerJobRequested))
228 >                        jobDestination.append(blockSites[block])
229 >                        msg+= "Job %s Destination: %s\n"%(str(jobCount+1),str(jobDestination[jobCount]))
230                          jobsOfBlock[block].append(jobCount+1)
231                          # reset counter
232                          jobCount = jobCount + 1
# Line 241 | Line 243 | class JobSplitter:
243                      # if more events in file remain than eventsPerJobRequested
244                      else :
245                          # close job but don't touch new file
246 <                        fullString = parString[:-2]
246 >                        fullString = parString[:-1]
247                          if self.useParent==1:
248 <                            fullParentString = pString[:-2]
248 >                            fullParentString = pString[:-1]
249                              list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)])
250                          else:
251                              list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
252 <                        common.logger.debug(3,"Job "+str(jobCount+1)+" can run over "+str(eventsPerJobRequested)+" events.")
253 <                        self.jobDestination.append(self.blockSites[block])
254 <                        common.logger.debug(5,"Job "+str(jobCount+1)+" Destination: "+str(self.jobDestination[jobCount]))
252 >                        msg += "Job %s can run over %s events.\n"%(str(jobCount+1),str(eventsPerJobRequested))
253 >                        jobDestination.append(blockSites[block])
254 >                        msg+= "Job %s Destination: %s\n"%(str(jobCount+1),str(jobDestination[jobCount]))
255                          jobsOfBlock[block].append(jobCount+1)
256                          # increase counter
257                          jobCount = jobCount + 1
# Line 260 | Line 262 | class JobSplitter:
262                          jobSkipEventCount = eventsPerJobRequested - (filesEventCount - jobSkipEventCount - self.eventsbyfile[file])
263                          # remove all but the last file
264                          filesEventCount = self.eventsbyfile[file]
265 +                        pString_tmp=''
266                          if self.useParent==1:
267 <                            for f in parent : pString += '\\\"' + f + '\\\"\,'
268 <                        parString = '\\\"' + file + '\\\"\,'
267 >                            for f in parent : pString_tmp +=  f + ','
268 >                        pString =  pString_tmp
269 >                        parString =  file + ','
270                      pass # END if
271                  pass # END while (iterate over files in the block)
272          pass # END while (iterate over blocks in the dataset)
273 +        common.logger.debug(msg)
274          self.ncjobs = self.total_number_of_jobs = jobCount
275          if (eventsRemaining > 0 and jobCount < totalNumberOfJobs ):
276 <            common.logger.message("Could not run on all requested events because some blocks not hosted at allowed sites.")
277 <        common.logger.message(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
276 >            common.logger.info("Could not run on all requested events because some blocks not hosted at allowed sites.")
277 >        common.logger.info(str(jobCount)+" job(s) can run on "+str(totalEventCount)+" events.\n")
278  
279          # skip check on  block with no sites  DD
280 <        if noBboundary == 0 : self.checkBlockNoSite(blocks,jobsOfBlock)
280 >        if noBboundary == 0 : self.checkBlockNoSite(blocks,jobsOfBlock,blockSites)
281  
282         # prepare dict output
283          dictOut = {}
284 +        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents']
285 +        if self.useParent: dictOut['params']= ['InputFiles','ParentFiles','MaxEvents','SkipEvents']
286          dictOut['args'] = list_of_lists
287 <        dictOut['jobDestination'] = self.jobDestination
287 >        dictOut['jobDestination'] = jobDestination
288          dictOut['njobs']=self.total_number_of_jobs
289  
290          return dictOut
291  
292          # keep trace of block with no sites to print a warning at the end
293  
294 <    def checkBlockNoSite(self,blocks,jobsOfBlock):  
294 >    def checkBlockNoSite(self,blocks,jobsOfBlock,blockSites):  
295          # screen output
296          screenOutput = "List of jobs and available destination sites:\n\n"
297          noSiteBlock = []
298          bloskNoSite = []
299 +        allBlock = []
300  
301          blockCounter = 0
302          for block in blocks:
303              if block in jobsOfBlock.keys() :
304                  blockCounter += 1
305 +                allBlock.append( blockCounter )
306                  screenOutput += "Block %5i: jobs %20s: sites: %s\n" % (blockCounter,spanRanges(jobsOfBlock[block]),
307 <                    ','.join(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(self.blockSites[block],block),block)))
308 <                if len(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(self.blockSites[block],block),block)) == 0:
307 >                    ','.join(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],[block]),[block])))
308 >                if len(self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(blockSites[block],[block]),[block])) == 0:
309                      noSiteBlock.append( spanRanges(jobsOfBlock[block]) )
310                      bloskNoSite.append( blockCounter )
311  
312 <        common.logger.message(screenOutput)
312 >        common.logger.info(screenOutput)
313          if len(noSiteBlock) > 0 and len(bloskNoSite) > 0:
314              msg = 'WARNING: No sites are hosting any part of data for block:\n                '
315              virgola = ""
# Line 315 | Line 324 | class JobSplitter:
324              for range_jobs in noSiteBlock:
325                  msg += str(range_jobs) + virgola
326              msg += '\n               will not be submitted and this block of data can not be analyzed!\n'
327 <            if self.cfg_params.has_key('EDG.se_white_list'):
328 <                msg += 'WARNING: SE White List: '+self.cfg_params['EDG.se_white_list']+'\n'
327 >            if self.cfg_params.has_key('GRID.se_white_list'):
328 >                msg += 'WARNING: SE White List: '+self.cfg_params['GRID.se_white_list']+'\n'
329                  msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
330                  msg += 'Please check if the dataset is available at this site!)\n'
331 <            if self.cfg_params.has_key('EDG.ce_white_list'):
332 <                msg += 'WARNING: CE White List: '+self.cfg_params['EDG.ce_white_list']+'\n'
331 >            if self.cfg_params.has_key('GRID.ce_white_list'):
332 >                msg += 'WARNING: CE White List: '+self.cfg_params['GRID.ce_white_list']+'\n'
333                  msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
334                  msg += 'Please check if the dataset is available at this site!)\n'
335  
336 <            common.logger.message(msg)
336 >            common.logger.info(msg)
337 >
338 >        if bloskNoSite == allBlock:
339 >            raise CrabException('No jobs created')
340  
341          return
342  
# Line 343 | Line 355 | class JobSplitter:
355          from WMCore.DataStructs.Run import Run
356  
357          self.checkUserSettings()
358 +        blockSites = self.args['blockSites']
359 +        pubdata = self.args['pubdata']
360  
361          if self.selectNumberOfJobs == 0 :
362              self.theNumberOfJobs = 9999999
363          blocks = {}
364          runList = []
365          thefiles = Fileset(name='FilesToSplit')
366 <        fileList = self.pubdata.getListFiles()
366 >        fileList = pubdata.getListFiles()
367          for f in fileList:
354           # print f
368              block = f['Block']['Name']
356          #  if not blocks.has_key(block):
357          #      blocks[block] = reader.listFileBlockLocation(block)
369              try:
370 <                f['Block']['StorageElementList'].extend(self.blockSites[block])
370 >                f['Block']['StorageElementList'].extend(blockSites[block])
371              except:
372                  continue
373              wmbsFile = File(f['LogicalFileName'])
374 <            [ wmbsFile['locations'].add(x) for x in self.blockSites[block] ]
374 >            [ wmbsFile['locations'].add(x) for x in blockSites[block] ]
375              wmbsFile['block'] = block
376              runNum = f['RunsList'][0]['RunNumber']
377              runList.append(runNum)
# Line 390 | Line 401 | class JobSplitter:
401                  res = self.getJobInfo(jobfactory())
402                  parString = ''
403                  for file in res['lfns']:
404 <                    parString += '\\\"' + file + '\\\"\,'
405 <                fullString = parString[:-2]
404 >                    parString += file + ','
405 >                fullString = parString[:-1]
406                  list_of_lists.append([fullString,str(-1),str(0)])    
407                  #need to check single file location
408                  jobDestination.append(res['locations'])  
409                  count +=1
399        #print jobDestination
410         # prepare dict output
411          dictOut = {}
412 +        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents']
413          dictOut['args'] = list_of_lists
414          dictOut['jobDestination'] = jobDestination
415          dictOut['njobs']=count
# Line 416 | Line 427 | class JobSplitter:
427                  for loc in file['locations']:
428                      if tmp_check < 1 :
429                          locations.append(loc)
430 <                    tmp_check = tmp_check + 1
430 >                tmp_check = tmp_check + 1
431                  ### qui va messo il check per la locations
432          res['lfns'] = lfns
433          res['locations'] = locations
# Line 427 | Line 438 | class JobSplitter:
438          """
439          Perform job splitting based on number of event per job
440          """
441 <        common.logger.debug(5,'Splitting per events')
441 >        common.logger.debug('Splitting per events')
442 >        self.checkUserSettings()
443 >        jobDestination=[]
444 >        if ( (self.selectTotalNumberEvents + self.selectEventsPerJob + self.selectNumberOfJobs) != 2 ):
445 >            msg = 'Must define exactly two of total_number_of_events, events_per_job, or number_of_jobs.'
446 >            raise CrabException(msg)
447 >
448 >        managedGenerators =self.args['managedGenerators']
449 >        generator = self.args['generator']
450 >        firstRun = self.cfg_params.get('CMSSW.first_run',None)
451  
452          if (self.selectEventsPerJob):
453 <            common.logger.message('Required '+str(self.eventsPerJob)+' events per job ')
453 >            common.logger.info('Required '+str(self.eventsPerJob)+' events per job ')
454          if (self.selectNumberOfJobs):
455 <            common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
455 >            common.logger.info('Required '+str(self.theNumberOfJobs)+' jobs in total ')
456          if (self.selectTotalNumberEvents):
457 <            common.logger.message('Required '+str(self.total_number_of_events)+' events in total ')
457 >            common.logger.info('Required '+str(self.total_number_of_events)+' events in total ')
458  
459          if (self.total_number_of_events < 0):
460              msg='Cannot split jobs per Events with "-1" as total number of events'
# Line 451 | Line 471 | class JobSplitter:
471              self.total_number_of_jobs = self.theNumberOfJobs
472              self.eventsPerJob = int(self.total_number_of_events/self.total_number_of_jobs)
473  
474 <        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
474 >        common.logger.debug('N jobs  '+str(self.total_number_of_jobs))
475  
476          # is there any remainder?
477          check = int(self.total_number_of_events) - (int(self.total_number_of_jobs)*self.eventsPerJob)
478  
479 <        common.logger.debug(5,'Check  '+str(check))
479 >        common.logger.debug('Check  '+str(check))
480  
481 <        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
481 >        common.logger.info(str(self.total_number_of_jobs)+' jobs can be created, each for '+str(self.eventsPerJob)+' for a total of '+str(self.total_number_of_jobs*self.eventsPerJob)+' events')
482          if check > 0:
483 <            common.logger.message('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
483 >            common.logger.info('Warning: asked '+str(self.total_number_of_events)+' but can do only '+str(int(self.total_number_of_jobs)*self.eventsPerJob))
484  
485          # argument is seed number.$i
486          self.list_of_args = []
487          for i in range(self.total_number_of_jobs):
488              ## Since there is no input, any site is good
489 <            self.jobDestination.append([""]) #must be empty to write correctly the xml
489 >            jobDestination.append([""]) #must be empty to write correctly the xml
490              args=[]
491 <            if (self.firstRun):
491 >            if (firstRun):
492                  ## pythia first run
493 <                args.append(str(self.firstRun)+str(i))
494 <            if (self.generator in self.managedGenerators):
495 <                if (self.generator == 'comphep' and i == 0):
493 >                args.append(str(firstRun)+str(i))
494 >            if (generator in managedGenerators):
495 >                if (generator == 'comphep' and i == 0):
496                      # COMPHEP is brain-dead and wants event #'s like 1,100,200,300
497                      args.append('1')
498                  else:
499                      args.append(str(i*self.eventsPerJob))
500 +            args.append(str(self.eventsPerJob))
501              self.list_of_args.append(args)
502         # prepare dict output
503 +
504          dictOut = {}
505 +        dictOut['params'] = ['MaxEvents']
506 +        if (firstRun):
507 +            dictOut['params'] = ['FirstRun','MaxEvents']
508 +            if ( generator in managedGenerators ) : dictOut['params'] = ['FirstRun', 'FirstEvent', 'MaxEvents']
509 +        else:  
510 +            if (generator in managedGenerators) : dictOut['params'] = ['FirstEvent', 'MaxEvents']
511          dictOut['args'] = self.list_of_args
512 <        dictOut['jobDestination'] = self.jobDestination
512 >        dictOut['jobDestination'] = jobDestination
513          dictOut['njobs']=self.total_number_of_jobs
514  
515          return dictOut
# Line 492 | Line 520 | class JobSplitter:
520          Perform job splitting based on number of job
521          """
522          self.checkUserSettings()
523 <        if (self.selectnumberofjobs == 0):
523 >        if (self.selectNumberOfJobs == 0):
524              msg = 'must specify  number_of_jobs.'
525              raise crabexception(msg)
526 <
527 <        common.logger.debug(5,'Splitting per job')
528 <        common.logger.message('Required '+str(self.theNumberOfJobs)+' jobs in total ')
526 >        jobDestination = []
527 >        common.logger.debug('Splitting per job')
528 >        common.logger.info('Required '+str(self.theNumberOfJobs)+' jobs in total ')
529  
530          self.total_number_of_jobs = self.theNumberOfJobs
531  
532 <        common.logger.debug(5,'N jobs  '+str(self.total_number_of_jobs))
532 >        common.logger.debug('N jobs  '+str(self.total_number_of_jobs))
533  
534 <        common.logger.message(str(self.total_number_of_jobs)+' jobs can be created')
534 >        common.logger.info(str(self.total_number_of_jobs)+' jobs can be created')
535  
536          # argument is seed number.$i
537 <        self.list_of_args = []
537 >        #self.list_of_args = []
538          for i in range(self.total_number_of_jobs):
539 <            self.jobDestination.append([""])
540 <            self.list_of_args.append([str(i)])
539 >            jobDestination.append([""])
540 >        #   self.list_of_args.append([str(i)])
541  
542         # prepare dict output
543          dictOut = {}
544 <        dictOut['args'] = self.list_of_args
545 <        dictOut['jobDestination'] = []
544 >        dictOut['args'] = [] # self.list_of_args
545 >        dictOut['jobDestination'] = jobDestination
546          dictOut['njobs']=self.total_number_of_jobs
547          return dictOut
548  

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines