ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/Splitter.py
(Generate patch)

Comparing COMP/CRAB/python/Splitter.py (file contents):
Revision 1.23 by spiga, Fri Jun 19 09:54:22 2009 UTC vs.
Revision 1.55 by belforte, Tue Mar 13 21:49:55 2012 UTC

# Line 1 | Line 1
1 +
2 + __revision__ = "$Id$"
3 + __version__ = "$Revision$"
4 +
5   import common
6   from crab_exceptions import *
7   from crab_util import *
8 +
9 + from WMCore.DataStructs.File import File
10 + from WMCore.DataStructs.Fileset import Fileset
11 + from WMCore.DataStructs.Run import Run
12 + from WMCore.DataStructs.Subscription import Subscription
13 + from WMCore.DataStructs.Workflow import Workflow
14 + from WMCore.JobSplitting.SplitterFactory import SplitterFactory
15   from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser
16 + try: # Can remove when CMSSW 3.7 and earlier are dropped
17 +    from FWCore.PythonUtilities.LumiList import LumiList
18 + except ImportError:
19 +    from LumiList import LumiList
20  
21   class JobSplitter:
22      def __init__( self, cfg_params,  args ):
23          self.cfg_params = cfg_params
24          self.args=args
25 +
26 +        self.lumisPerJob = -1
27 +        self.totalNLumis = 0
28 +        self.theNumberOfJobs = 0
29 +        self.limitNJobs = False
30 +        self.limitTotalLumis = False
31 +        self.limitJobLumis = False
32 +
33          #self.maxEvents
34          # init BlackWhiteListParser
35 <        seWhiteList = cfg_params.get('GRID.se_white_list',[])
35 >        self.seWhiteList = cfg_params.get('GRID.se_white_list',[])
36          seBlackList = cfg_params.get('GRID.se_black_list',[])
37 <        self.blackWhiteListParser = SEBlackWhiteListParser(seWhiteList, seBlackList, common.logger())
37 >        self.blackWhiteListParser = SEBlackWhiteListParser(self.seWhiteList, seBlackList, common.logger())
38 >
39 >        ## check if has been asked for a non default file to store/read analyzed fileBlocks
40 >        defaultName = common.work_space.shareDir()+'AnalyzedBlocks.txt'
41 >        self.fileBlocks_FileName = os.path.abspath(self.cfg_params.get('CMSSW.fileblocks_file',defaultName))
42  
43  
44      def checkUserSettings(self):
# Line 42 | Line 69 | class JobSplitter:
69              self.total_number_of_events = 0
70              self.selectTotalNumberEvents = 0
71  
72 +        return
73 +
74 +    def checkLumiSettings(self):
75 +        """
76 +        Check to make sure the user has specified enough information to
77 +        perform splitting by Lumis to run the job
78 +        """
79 +        settings = 0
80 +        if self.cfg_params.has_key('CMSSW.lumis_per_job'):
81 +            self.lumisPerJob =int( self.cfg_params['CMSSW.lumis_per_job'])
82 +            self.limitJobLumis = True
83 +            settings += 1
84 +
85 +        if self.cfg_params.has_key('CMSSW.number_of_jobs'):
86 +            self.theNumberOfJobs =int( self.cfg_params['CMSSW.number_of_jobs'])
87 +            self.limitNJobs = True
88 +            settings += 1
89 +
90 +        if self.cfg_params.has_key('CMSSW.total_number_of_lumis'):
91 +            self.totalNLumis = int(self.cfg_params['CMSSW.total_number_of_lumis'])
92 +            self.limitTotalLumis = (self.totalNLumis != -1)
93 +            settings += 1
94 +
95 +        if settings != 2:
96 +            msg = 'When splitting by lumi section you must specify two and only two of:\n'
97 +            msg += '  number_of_jobs, lumis_per_job, total_number_of_lumis'
98 +            raise CrabException(msg)
99 +        if self.limitNJobs and self.limitJobLumis:
100 +            self.limitTotalLumis = True
101 +            self.totalNLumis = self.lumisPerJob * self.theNumberOfJobs
102 +
103 +        # Has the user specified runselection?
104 +        if (self.cfg_params.has_key('CMSSW.runselection')):
105 +            common.logger.info('You have specified runselection and split by lumi.')
106 +            common.logger.info('Good lumi list will be the intersection of runselection and lumimask or ADS (if any).')
107 +        return
108 +
109 +    def ComputeSubBlockSites( self, blockSites ):
110 +        """
111 +        """
112 +        sub_blockSites = {}
113 +        for k,v in blockSites.iteritems():
114 +            sites=self.blackWhiteListParser.checkWhiteList(v)
115 +            if sites : sub_blockSites[k]=v
116 +        if len(sub_blockSites) < 1:
117 +            msg = 'WARNING: the sites %s is not hosting any part of data.'%self.seWhiteList
118 +            raise CrabException(msg)
119 +        return sub_blockSites
120  
121   ########################################################################
122      def jobSplittingByEvent( self ):
# Line 77 | Line 152 | class JobSplitter:
152          self.useParent = int(self.cfg_params.get('CMSSW.use_parent',0))
153          noBboundary = int(self.cfg_params.get('CMSSW.no_block_boundary',0))
154  
155 +        if noBboundary == 1:
156 +            if self.total_number_of_events== -1:
157 +                msg = 'You are selecting no_block_boundary=1 which does not allow to set total_number_of_events=-1\n'
158 +                msg +='\tYou shoud get the number of event from DBS web interface and use it for your configuration.'
159 +                raise CrabException(msg)
160 +            if len(self.seWhiteList) == 0 or  len(self.seWhiteList.split(',')) != 1:
161 +                msg = 'You are selecting no_block_boundary=1 which requires to choose one and only one site.\n'
162 +                msg += "\tPlease set se_white_list with the site's storage element name."
163 +                raise  CrabException(msg)
164 +            blockSites = self.ComputeSubBlockSites(blockSites)
165 +
166          # ---- Handle the possible job splitting configurations ---- #
167          if (self.selectTotalNumberEvents):
168              totalEventsRequested = self.total_number_of_events
# Line 109 | Line 195 | class JobSplitter:
195          if (self.selectNumberOfJobs):
196              common.logger.info("May not create the exact number_of_jobs requested.")
197  
198 +        if (self.theNumberOfJobs < 0):
199 +            common.logger.info("ERROR: Negative number_of_jobs requested. Will result in no jobs.")
200 +            
201          # old... to remove Daniele
202          totalNumberOfJobs = 999999999
203  
# Line 193 | Line 282 | class JobSplitter:
282                                  fullString = parString[:-1]
283                                  if self.useParent==1:
284                                      fullParentString = pString[:-1]
285 <                                    list_of_lists.append([fullString,fullParentString,str(-1),str(jobSkipEventCount)])
285 >                                    list_of_lists.append([fullString,fullParentString,str(-1),str(jobSkipEventCount),block])
286                                  else:
287 <                                    list_of_lists.append([fullString,str(-1),str(jobSkipEventCount)])
287 >                                    list_of_lists.append([fullString,str(-1),str(jobSkipEventCount),block])
288                                  msg += "Job %s can run over %s  events (last file in block).\n"%(str(jobCount+1), str(filesEventCount - jobSkipEventCount))
289                                  jobDestination.append(blockSites[block])
290                                  msg += "Job %s Destination: %s\n"%(str(jobCount+1),str(SE2CMS(jobDestination[jobCount])))
# Line 222 | Line 311 | class JobSplitter:
311                          fullString = parString[:-1]
312                          if self.useParent==1:
313                              fullParentString = pString[:-1]
314 <                            list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)])
314 >                            list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount),block])
315                          else:
316 <                            list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
316 >                            list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount),block])
317                          msg += "Job %s can run over %s events.\n"%(str(jobCount+1),str(eventsPerJobRequested))
318                          jobDestination.append(blockSites[block])
319                          msg+= "Job %s Destination: %s\n"%(str(jobCount+1),str(SE2CMS(jobDestination[jobCount])))
# Line 247 | Line 336 | class JobSplitter:
336                          fullString = parString[:-1]
337                          if self.useParent==1:
338                              fullParentString = pString[:-1]
339 <                            list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount)])
339 >                            list_of_lists.append([fullString,fullParentString,str(eventsPerJobRequested),str(jobSkipEventCount),block])
340                          else:
341 <                            list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount)])
341 >                            list_of_lists.append([fullString,str(eventsPerJobRequested),str(jobSkipEventCount),block])
342                          msg += "Job %s can run over %s events.\n"%(str(jobCount+1),str(eventsPerJobRequested))
343                          jobDestination.append(blockSites[block])
344                          msg+= "Job %s Destination: %s\n"%(str(jobCount+1),str(SE2CMS(jobDestination[jobCount])))
# Line 282 | Line 371 | class JobSplitter:
371  
372         # prepare dict output
373          dictOut = {}
374 <        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents']
375 <        if self.useParent: dictOut['params']= ['InputFiles','ParentFiles','MaxEvents','SkipEvents']
374 >        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents','InputBlocks']
375 >        if self.useParent: dictOut['params']= ['InputFiles','ParentFiles','MaxEvents','SkipEvents','InputBlocks']
376          dictOut['args'] = list_of_lists
377          dictOut['jobDestination'] = jobDestination
378          dictOut['njobs']=self.total_number_of_jobs
# Line 300 | Line 389 | class JobSplitter:
389          allBlock = []
390  
391          blockCounter = 0
392 +        saveFblocks =''
393          for block in blocks:
394              if block in jobsOfBlock.keys() :
395                  blockCounter += 1
# Line 310 | Line 400 | class JobSplitter:
400                  if len(sites) == 0:
401                      noSiteBlock.append( spanRanges(jobsOfBlock[block]) )
402                      bloskNoSite.append( blockCounter )
403 +                else:
404 +                    saveFblocks += str(block)+'\n'
405 +        writeTXTfile(self, self.fileBlocks_FileName , saveFblocks)
406  
407          common.logger.info(screenOutput)
408          if len(noSiteBlock) > 0 and len(bloskNoSite) > 0:
# Line 319 | Line 412 | class JobSplitter:
412                  virgola = ","
413              for block in bloskNoSite:
414                  msg += ' ' + str(block) + virgola
415 <            msg += '\n               Related jobs:\n                 '
415 >            msg += '\n\t\tRelated jobs:\n                 '
416              virgola = ""
417              if len(noSiteBlock) > 1:
418                  virgola = ","
419              for range_jobs in noSiteBlock:
420                  msg += str(range_jobs) + virgola
421 <            msg += '\n               will not be submitted and this block of data can not be analyzed!\n'
421 >            msg += '\n\t\twill not be submitted and this block of data can not be analyzed!\n'
422              if self.cfg_params.has_key('GRID.se_white_list'):
423 <                msg += 'WARNING: SE White List: '+self.cfg_params['GRID.se_white_list']+'\n'
424 <                msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
425 <                msg += 'Please check if the dataset is available at this site!)\n'
423 >                msg += '\tWARNING: SE White List: '+self.cfg_params['GRID.se_white_list']+'\n'
424 >                msg += '\t(Hint: By whitelisting you force the job to run at this particular site(s).\n'
425 >                msg += '\tPlease check if the dataset is available at this site!)'
426              if self.cfg_params.has_key('GRID.ce_white_list'):
427 <                msg += 'WARNING: CE White List: '+self.cfg_params['GRID.ce_white_list']+'\n'
428 <                msg += '(Hint: By whitelisting you force the job to run at this particular site(s).\n'
429 <                msg += 'Please check if the dataset is available at this site!)\n'
427 >                msg += '\tWARNING: CE White List: '+self.cfg_params['GRID.ce_white_list']+'\n'
428 >                msg += '\t(Hint: By whitelisting you force the job to run at this particular site(s).\n'
429 >                msg += '\tPlease check if the dataset is available at this site!)\n'
430  
431              common.logger.info(msg)
432  
433          if bloskNoSite == allBlock:
434 <            raise CrabException('No jobs created')
434 >            msg = 'Requested jobs cannot be Created! \n'
435 >            if self.cfg_params.has_key('GRID.se_white_list'):
436 >                msg += '\tWARNING: SE White List: '+self.cfg_params['GRID.se_white_list']+'\n'
437 >                msg += '\t(Hint: By whitelisting you force the job to run at this particular site(s).\n'
438 >                msg += '\tPlease check if the dataset is available at this site!)'
439 >            if self.cfg_params.has_key('GRID.ce_white_list'):
440 >                msg += '\tWARNING: CE White List: '+self.cfg_params['GRID.ce_white_list']+'\n'
441 >                msg += '\t(Hint: By whitelisting you force the job to run at this particular site(s).\n'
442 >                msg += '\tPlease check if the dataset is available at this site!)\n'
443 >            raise CrabException(msg)
444  
445          return
446  
# Line 347 | Line 449 | class JobSplitter:
449      def jobSplittingByRun(self):
450          """
451          """
350        from sets import Set
351        from WMCore.JobSplitting.RunBased import RunBased
352        from WMCore.DataStructs.Workflow import Workflow
353        from WMCore.DataStructs.File import File
354        from WMCore.DataStructs.Fileset import Fileset
355        from WMCore.DataStructs.Subscription import Subscription
356        from WMCore.JobSplitting.SplitterFactory import SplitterFactory
357        from WMCore.DataStructs.Run import Run
452  
453          self.checkUserSettings()
454          blockSites = self.args['blockSites']
# Line 373 | Line 467 | class JobSplitter:
467              except:
468                  continue
469              wmbsFile = File(f['LogicalFileName'])
470 +            if not  blockSites[block]:
471 +                msg = 'WARNING: No sites are hosting any part of data for block: %s\n' %block                
472 +                msg += 'Related jobs will not be submitted and this block of data can not be analyzed'
473 +                common.logger.debug(msg)
474              [ wmbsFile['locations'].add(x) for x in blockSites[block] ]
475              wmbsFile['block'] = block
476              runNum = f['RunsList'][0]['RunNumber']
# Line 393 | Line 491 | class JobSplitter:
491          jobfactory = splitter(subs)
492  
493          #loop over all runs
396        set = Set(runList)
494          list_of_lists = []
495          jobDestination = []
496 +        list_of_blocks = []
497          count = 0
498          for jobGroup in  jobfactory():
499              if count <  self.theNumberOfJobs:
# Line 403 | Line 501 | class JobSplitter:
501                  parString = ''
502                  for file in res['lfns']:
503                      parString += file + ','
504 +                list_of_blocks.append(res['block'])
505                  fullString = parString[:-1]
506 <                list_of_lists.append([fullString,str(-1),str(0)])
506 >                blockString=','.join(list_of_blocks)
507 >                list_of_lists.append([fullString,str(-1),str(0),blockString])
508                  #need to check single file location
509                  jobDestination.append(res['locations'])
510                  count +=1
511 <       # prepare dict output
511 >        # prepare dict output
512          dictOut = {}
513 <        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents']
513 >        dictOut['params']= ['InputFiles','MaxEvents','SkipEvents','InputBlocks']
514          dictOut['args'] = list_of_lists
515          dictOut['jobDestination'] = jobDestination
516          dictOut['njobs']=count
517 +        self.cacheBlocks(list_of_blocks,jobDestination)
518  
519          return dictOut
520  
# Line 428 | Line 529 | class JobSplitter:
529                  for loc in file['locations']:
530                      if tmp_check < 1 :
531                          locations.append(loc)
532 +                        res['block']= file['block']
533                  tmp_check = tmp_check + 1
432                ### qui va messo il check per la locations
534          res['lfns'] = lfns
535          res['locations'] = locations
536          return res
# Line 474 | Line 575 | class JobSplitter:
575  
576          managedGenerators =self.args['managedGenerators']
577          generator = self.args['generator']
578 <        firstRun = self.cfg_params.get('CMSSW.first_run',None)
578 >        firstLumi = self.cfg_params.get('CMSSW.first_lumi', 1)
579  
580          self.prepareSplittingNoInput()
581  
# Line 493 | Line 594 | class JobSplitter:
594          self.list_of_args = []
595          for i in range(self.total_number_of_jobs):
596              ## Since there is no input, any site is good
597 <            jobDestination.append([""]) #must be empty to write correctly the xml
597 >            jobDestination.append([""]) # must be empty to correctly write the XML
598              args=[]
599 <            if (firstRun):
600 <                ## pythia first run
500 <                args.append(str(firstRun)+str(i))
599 >            if (firstLumi): # Pythia first lumi
600 >                args.append(str(int(firstLumi)+i))
601              if (generator in managedGenerators):
602                 args.append(generator)
603                 if (generator == 'comphep' and i == 0):
# Line 511 | Line 611 | class JobSplitter:
611  
612          dictOut = {}
613          dictOut['params'] = ['MaxEvents']
614 <        if (firstRun):
615 <            dictOut['params'] = ['FirstRun','MaxEvents']
616 <            if ( generator in managedGenerators ) :
617 <                dictOut['params'] = ['FirstRun', 'Generator', 'FirstEvent', 'MaxEvents']
614 >        if (firstLumi):
615 >            dictOut['params'] = ['FirstLumi','MaxEvents']
616 >            if (generator in managedGenerators):
617 >                dictOut['params'] = ['FirstLumi', 'Generator', 'FirstEvent', 'MaxEvents']
618          else:
619              if (generator in managedGenerators) :
620                  dictOut['params'] = ['Generator', 'FirstEvent', 'MaxEvents']
# Line 565 | Line 665 | class JobSplitter:
665  
666      def jobSplittingByLumi(self):
667          """
668 +        Split task into jobs by Lumi section paying attention to which
669 +        lumis should be run (according to the analysis dataset).
670 +        This uses WMBS job splitting which does not split files over jobs
671 +        so the job will have AT LEAST as many lumis as requested, perhaps
672 +        more
673          """
674 <        return
674 >        self.useParent = int(self.cfg_params.get('CMSSW.use_parent',0))
675 >        common.logger.debug('Splitting by Lumi')
676 >        self.checkLumiSettings()
677 >
678 >        blockSites = self.args['blockSites']
679 >        pubdata = self.args['pubdata']
680 >
681 >        lumisPerFile  = pubdata.getLumis()
682 >        self.parentFiles=pubdata.getParent()
683 >        # Make the list of WMBS files for job splitter
684 >        fileList = pubdata.getListFiles()
685 >        wmFileList = []
686 >        for jobFile in fileList:
687 >            block = jobFile['Block']['Name']
688 >            try:
689 >                jobFile['Block']['StorageElementList'].extend(blockSites[block])
690 >            except:
691 >                continue
692 >            wmbsFile = File(jobFile['LogicalFileName'])
693 >            if not  blockSites[block]:
694 >                msg = 'WARNING: No sites are hosting any part of data for block: %s\n' %block
695 >                msg += 'Related jobs will not be submitted and this block of data can not be analyzed'
696 >                common.logger.debug(msg)
697 >               # wmbsFile['locations'].add('Nowhere')
698 >            [ wmbsFile['locations'].add(x) for x in blockSites[block] ]
699 >            wmbsFile['block'] = block
700 >            for lumi in lumisPerFile[jobFile['LogicalFileName']]:
701 >                wmbsFile.addRun(Run(lumi[0], lumi[1]))
702 >            wmFileList.append(wmbsFile)
703 >
704 >        fileSet = set(wmFileList)
705 >        thefiles = Fileset(name='FilesToSplit', files = fileSet)
706 >
707 >        # Create the factory and workflow
708 >        work = Workflow()
709 >        subs = Subscription(fileset    = thefiles,    workflow = work,
710 >                            split_algo = 'LumiBased', type     = "Processing")
711 >        splitter = SplitterFactory()
712 >        jobFactory = splitter(subs)
713 >
714 >        list_of_lists = []
715 >        jobDestination = []
716 >        jobCount = 0
717 >        lumisCreated = 0
718 >        list_of_blocks = []
719 >        if not self.limitJobLumis:
720 >            if self.totalNLumis > 0:
721 >                self.lumisPerJob = max(self.totalNLumis // self.theNumberOfJobs,1)
722 >            else:
723 >                self.lumisPerJob = pubdata.getMaxLumis() // self.theNumberOfJobs + 1
724 >            common.logger.info('Each job will process about %s lumis.' %
725 >                                self.lumisPerJob)
726 >
727 >        for jobGroup in jobFactory(lumis_per_job = self.lumisPerJob):
728 >            for job in jobGroup.jobs:
729 >                if (self.limitNJobs and jobCount >= self.theNumberOfJobs):
730 >                    common.logger.info('Requested number of jobs reached.')
731 >                    break
732 >                if (self.limitTotalLumis and lumisCreated >= self.totalNLumis):
733 >                    common.logger.info('Requested number of lumis reached.')
734 >                    break
735 >                lumis = []
736 >                lfns  = []
737 >                if self.useParent==1:
738 >                 parentlfns  = []
739 >                 pString =""
740 >
741 >                locations = []
742 >                blocks = []
743 >                firstFile = True
744 >                # Collect information from all the files
745 >                for jobFile in job.getFiles():
746 >                    doFile = False
747 >                    if firstFile:  # Get locations from first file in the job
748 >                        for loc in jobFile['locations']:
749 >                            locations.append(loc)
750 >                        blocks.append(jobFile['block'])
751 >                        firstFile = False
752 >                    # Accumulate Lumis from all files
753 >                    for lumiList in jobFile['runs']:
754 >                        theRun = lumiList.run
755 >                        for theLumi in list(lumiList):
756 >                            if (not self.limitTotalLumis) or \
757 >                               (lumisCreated < self.totalNLumis):
758 >                                doFile = True
759 >                                lumisCreated += 1
760 >                                lumis.append( (theRun, theLumi) )
761 >                    if doFile:
762 >                        lfns.append(jobFile['lfn'])
763 >                        if self.useParent==1:
764 >                           parent = self.parentFiles[jobFile['lfn']]
765 >                           for p in parent :
766 >                               pString += p  + ','
767 >                fileString = ','.join(lfns)
768 >                lumiLister = LumiList(lumis = lumis)
769 >                lumiString = lumiLister.getCMSSWString()
770 >                blockString=','.join(blocks)
771 >                if self.useParent==1:
772 >                  common.logger.debug("Files: "+fileString+" with the following parents: "+pString[:-1])
773 >                  pfileString = pString[:-1]
774 >                  list_of_lists.append([fileString, pfileString, str(-1), str(0), lumiString,blockString])
775 >                else:
776 >                 list_of_lists.append([fileString, str(-1), str(0), lumiString, blockString])
777 >                list_of_blocks.append(blocks)
778 >                jobDestination.append(locations)
779 >                jobCount += 1
780 >                common.logger.debug('Job %s will run on %s files and %s lumis '
781 >                    % (jobCount, len(lfns), len(lumis) ))
782 >
783 >        common.logger.info('%s jobs created to run on %s lumis' %
784 >                              (jobCount, lumisCreated))
785 >
786 >        # Prepare dict output matching back to non-WMBS job creation
787 >        dictOut = {}
788 >        dictOut['params'] = ['InputFiles', 'MaxEvents', 'SkipEvents', 'Lumis','InputBlocks']
789 >        if self.useParent==1:
790 >         dictOut['params']= ['InputFiles','ParentFiles','MaxEvents','SkipEvents','Lumis','InputBlocks']
791 >        dictOut['args'] = list_of_lists
792 >        dictOut['jobDestination'] = jobDestination
793 >        dictOut['njobs'] = jobCount
794 >        self.cacheBlocks(list_of_blocks,jobDestination)
795 >
796 >        return dictOut
797 >
798 >    def cacheBlocks(self, blocks,destinations):
799 >
800 >        saveFblocks=''
801 >        for i in range(len(blocks)):
802 >            sites=self.blackWhiteListParser.checkWhiteList(self.blackWhiteListParser.checkBlackList(destinations[i]))
803 >            if len(sites) != 0:
804 >                for block in blocks[i]:
805 >                    saveFblocks += str(block)+'\n'
806 >        writeTXTfile(self, self.fileBlocks_FileName , saveFblocks)
807 >
808      def Algos(self):
809          """
810          Define key splittingType matrix

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines