3 |
|
import time, glob |
4 |
|
from Actor import * |
5 |
|
from crab_util import * |
6 |
– |
from crab_logger import Logger |
6 |
|
from crab_exceptions import * |
7 |
|
from ProdCommon.FwkJobRep.ReportParser import readJobReport |
8 |
+ |
from ProdCommon.FwkJobRep.ReportState import checkSuccess |
9 |
|
from ProdCommon.MCPayloads.WorkflowSpec import WorkflowSpec |
10 |
|
from ProdCommon.DataMgmt.DBS.DBSWriter import DBSWriter |
11 |
|
from ProdCommon.DataMgmt.DBS.DBSErrors import DBSWriterError, formatEx,DBSReaderError |
12 |
|
from ProdCommon.DataMgmt.DBS.DBSReader import DBSReader |
13 |
|
from ProdCommon.DataMgmt.DBS.DBSWriter import DBSWriter,DBSWriterObjects |
14 |
|
import sys |
15 |
+ |
from DBSAPI.dbsApiException import DbsException |
16 |
+ |
from DBSAPI.dbsApi import DbsApi |
17 |
|
|
18 |
|
class Publisher(Actor): |
19 |
|
def __init__(self, cfg_params): |
20 |
|
""" |
21 |
< |
Publisher class: |
21 |
> |
Publisher class: |
22 |
|
|
23 |
|
- parses CRAB FrameworkJobReport on UI |
24 |
|
- returns <file> section of xml in dictionary format for each xml file in crab_0_xxxx/res directory |
25 |
|
- publishes output data on DBS and DLS |
26 |
|
""" |
27 |
+ |
self.cfg_params=cfg_params |
28 |
|
|
29 |
< |
try: |
27 |
< |
userprocessedData = cfg_params['USER.publish_data_name'] |
28 |
< |
self.processedData = None |
29 |
< |
except KeyError: |
29 |
> |
if not cfg_params.has_key('USER.publish_data_name'): |
30 |
|
raise CrabException('Cannot publish output data, because you did not specify USER.publish_data_name parameter in the crab.cfg file') |
31 |
+ |
self.userprocessedData = cfg_params['USER.publish_data_name'] |
32 |
+ |
self.processedData = None |
33 |
|
|
34 |
< |
try: |
35 |
< |
if (int(cfg_params['USER.copy_data']) != 1): raise KeyError |
36 |
< |
except KeyError: |
37 |
< |
raise CrabException('You can not publish data because you did not selected *** copy_data = 1 *** in the crab.cfg file') |
38 |
< |
try: |
39 |
< |
self.pset = cfg_params['CMSSW.pset'] |
40 |
< |
except KeyError: |
34 |
> |
if (not cfg_params.has_key('USER.copy_data') or int(cfg_params['USER.copy_data']) != 1 ) or \ |
35 |
> |
(not cfg_params.has_key('USER.publish_data') or int(cfg_params['USER.publish_data']) != 1 ): |
36 |
> |
msg = 'You can not publish data because you did not selected \n' |
37 |
> |
msg += '\t*** copy_data = 1 and publish_data = 1 *** in the crab.cfg file' |
38 |
> |
raise CrabException(msg) |
39 |
> |
|
40 |
> |
if not cfg_params.has_key('CMSSW.pset'): |
41 |
|
raise CrabException('Cannot publish output data, because you did not specify the psetname in [CMSSW] of your crab.cfg file') |
42 |
< |
try: |
43 |
< |
self.globalDBS=cfg_params['CMSSW.dbs_url'] |
44 |
< |
except KeyError: |
45 |
< |
self.globalDBS="http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet" |
46 |
< |
try: |
45 |
< |
self.DBSURL=cfg_params['USER.dbs_url_for_publication'] |
46 |
< |
common.logger.message('<dbs_url_for_publication> = '+self.DBSURL) |
47 |
< |
if (self.DBSURL == "http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet") or (self.DBSURL == "https://cmsdbsprod.cern.ch:8443/cms_dbs_prod_global_writer/servlet/DBSServlet"): |
48 |
< |
msg = "You can not publish your data in the globalDBS = " + self.DBSURL + "\n" |
49 |
< |
msg = msg + "Please write your local one in the [USER] section 'dbs_url_for_publication'" |
50 |
< |
raise CrabException(msg) |
51 |
< |
except KeyError: |
42 |
> |
self.pset = cfg_params['CMSSW.pset'] |
43 |
> |
|
44 |
> |
self.globalDBS=cfg_params.get('CMSSW.dbs_url',"http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet") |
45 |
> |
|
46 |
> |
if not cfg_params.has_key('USER.dbs_url_for_publication'): |
47 |
|
msg = "Warning. The [USER] section does not have 'dbs_url_for_publication'" |
48 |
|
msg = msg + " entry, necessary to publish the data.\n" |
49 |
|
msg = msg + "Use the command **crab -publish -USER.dbs_url_for_publication=dbs_url_for_publication*** \nwhere dbs_url_for_publication is your local dbs instance." |
50 |
|
raise CrabException(msg) |
51 |
< |
|
51 |
> |
|
52 |
> |
self.DBSURL=cfg_params['USER.dbs_url_for_publication'] |
53 |
> |
common.logger.info('<dbs_url_for_publication> = '+self.DBSURL) |
54 |
> |
if (self.DBSURL == "http://cmsdbsprod.cern.ch/cms_dbs_prod_global/servlet/DBSServlet") or (self.DBSURL == "https://cmsdbsprod.cern.ch:8443/cms_dbs_prod_global_writer/servlet/DBSServlet"): |
55 |
> |
msg = "You can not publish your data in the globalDBS = " + self.DBSURL + "\n" |
56 |
> |
msg = msg + "Please write your local one in the [USER] section 'dbs_url_for_publication'" |
57 |
> |
raise CrabException(msg) |
58 |
> |
|
59 |
|
self.content=file(self.pset).read() |
60 |
|
self.resDir = common.work_space.resDir() |
61 |
< |
|
61 |
> |
|
62 |
|
self.dataset_to_import=[] |
63 |
< |
|
63 |
> |
|
64 |
|
self.datasetpath=cfg_params['CMSSW.datasetpath'] |
65 |
|
if (self.datasetpath.upper() != 'NONE'): |
66 |
|
self.dataset_to_import.append(self.datasetpath) |
67 |
< |
|
67 |
> |
|
68 |
|
### Added PU dataset |
69 |
|
tmp = cfg_params.get('CMSSW.dataset_pu',None) |
70 |
|
if tmp : |
72 |
|
for dataset in datasets: |
73 |
|
dataset=string.strip(dataset) |
74 |
|
self.dataset_to_import.append(dataset) |
75 |
< |
### |
76 |
< |
|
75 |
> |
### |
76 |
> |
|
77 |
> |
|
78 |
> |
self.import_all_parents = cfg_params.get('USER.publish_with_import_all_parents',1) |
79 |
> |
|
80 |
> |
### fede import parent dataset is compulsory ### |
81 |
> |
if ( int(self.import_all_parents) == 0 ): |
82 |
> |
common.logger.info("WARNING: The option USER.publish_with_import_all_parents=0 has been deprecated. The import of parents is compulsory and done by default") |
83 |
> |
############ |
84 |
> |
|
85 |
> |
self.skipOcheck=cfg_params.get('CMSSW.publish_zero_event',0) |
86 |
> |
|
87 |
|
self.SEName='' |
88 |
|
self.CMSSW_VERSION='' |
89 |
|
self.exit_status='' |
90 |
|
self.time = time.strftime('%y%m%d_%H%M%S',time.localtime(time.time())) |
91 |
< |
self.problemFiles=[] |
91 |
> |
self.problemFiles=[] |
92 |
|
self.noEventsFiles=[] |
93 |
|
self.noLFN=[] |
94 |
< |
|
94 |
> |
|
95 |
|
def importParentDataset(self,globalDBS, datasetpath): |
96 |
|
""" |
97 |
< |
""" |
97 |
> |
""" |
98 |
|
dbsWriter = DBSWriter(self.DBSURL,level='ERROR') |
99 |
< |
|
99 |
> |
|
100 |
|
try: |
101 |
< |
dbsWriter.importDatasetWithoutParentage(globalDBS, self.datasetpath, self.DBSURL) |
101 |
> |
#if (self.import_all_parents==1): |
102 |
> |
common.logger.info("--->>> Importing all parents level") |
103 |
> |
start = time.time() |
104 |
> |
common.logger.debug("start import time: " + str(start)) |
105 |
> |
### to skip the ProdCommon api exception in the case of block without location |
106 |
> |
### skipNoSiteError=True |
107 |
> |
#dbsWriter.importDataset(globalDBS, datasetpath, self.DBSURL, skipNoSiteError=True) |
108 |
> |
### calling dbs api directly |
109 |
> |
dbsWriter.dbs.migrateDatasetContents(globalDBS, self.DBSURL, datasetpath) |
110 |
> |
stop = time.time() |
111 |
> |
common.logger.debug("stop import time: " + str(stop)) |
112 |
> |
common.logger.info("--->>> duration of all parents import (sec): "+str(stop - start)) |
113 |
> |
## still not removing the code, but TODO for the final release... |
114 |
> |
""" |
115 |
> |
else: |
116 |
> |
common.logger.info("--->>> Importing only the datasetpath " + datasetpath) |
117 |
> |
start = time.time() |
118 |
> |
#dbsWriter.importDatasetWithoutParentage(globalDBS, datasetpath, self.DBSURL, skipNoSiteError=True) |
119 |
> |
### calling dbs api directly |
120 |
> |
common.logger.debug("start import time: " + str(start)) |
121 |
> |
dbsWriter.dbs.migrateDatasetContents(globalDBS, self.DBSURL, datasetpath, noParentsReadOnly = True ) |
122 |
> |
stop = time.time() |
123 |
> |
common.logger.debug("stop import time: " + str(stop)) |
124 |
> |
common.logger.info("--->>> duration of first level parent import (sec): "+str(stop - start)) |
125 |
> |
""" |
126 |
|
except DBSWriterError, ex: |
127 |
|
msg = "Error importing dataset to be processed into local DBS\n" |
128 |
|
msg += "Source Dataset: %s\n" % datasetpath |
129 |
|
msg += "Source DBS: %s\n" % globalDBS |
130 |
|
msg += "Destination DBS: %s\n" % self.DBSURL |
131 |
< |
common.logger.message(msg) |
131 |
> |
common.logger.info(msg) |
132 |
> |
common.logger.info(str(ex)) |
133 |
|
return 1 |
134 |
|
return 0 |
135 |
< |
|
135 |
> |
|
136 |
|
def publishDataset(self,file): |
137 |
|
""" |
138 |
|
""" |
139 |
|
try: |
140 |
+ |
### fjr content as argument |
141 |
|
jobReport = readJobReport(file)[0] |
142 |
|
self.exit_status = '0' |
143 |
|
except IndexError: |
144 |
|
self.exit_status = '1' |
145 |
< |
msg = "Error: Problem with "+file+" file" |
146 |
< |
common.logger.message(msg) |
145 |
> |
msg = "Error: Problem with "+file+" file" |
146 |
> |
common.logger.info(msg) |
147 |
|
return self.exit_status |
148 |
|
|
149 |
+ |
#print "###################################################" |
150 |
+ |
#print "len(jobReport.files) = ", len(jobReport.files) |
151 |
+ |
#print "jobReport.files = ", jobReport.files |
152 |
+ |
#print "###################################################" |
153 |
+ |
|
154 |
|
if (len(self.dataset_to_import) != 0): |
155 |
|
for dataset in self.dataset_to_import: |
156 |
< |
common.logger.message("--->>> Importing parent dataset in the dbs: " +dataset) |
156 |
> |
common.logger.info("--->>> Importing parent dataset in the dbs: " +dataset) |
157 |
|
status_import=self.importParentDataset(self.globalDBS, dataset) |
158 |
|
if (status_import == 1): |
159 |
< |
common.logger.message('Problem with parent '+ dataset +' import from the global DBS '+self.globalDBS+ 'to the local one '+self.DBSURL) |
159 |
> |
common.logger.info('Problem with parent '+ dataset +' import from the global DBS '+self.globalDBS+ 'to the local one '+self.DBSURL) |
160 |
|
self.exit_status='1' |
161 |
|
return self.exit_status |
162 |
< |
else: |
163 |
< |
common.logger.message('Import ok of dataset '+dataset) |
164 |
< |
|
165 |
< |
#// DBS to contact |
166 |
< |
dbswriter = DBSWriter(self.DBSURL) |
124 |
< |
try: |
125 |
< |
fileinfo= jobReport.files[0] |
126 |
< |
self.exit_status = '0' |
127 |
< |
except IndexError: |
162 |
> |
else: |
163 |
> |
common.logger.info('Import ok of dataset '+dataset) |
164 |
> |
|
165 |
> |
|
166 |
> |
if (len(jobReport.files) <= 0) : |
167 |
|
self.exit_status = '1' |
168 |
< |
msg = "Error: No file to publish in xml file"+file+" file" |
169 |
< |
common.logger.message(msg) |
168 |
> |
msg = "Error: No EDM file to publish in xml file"+file+" file" |
169 |
> |
common.logger.info(msg) |
170 |
|
return self.exit_status |
171 |
+ |
else: |
172 |
+ |
msg = "fjr contains some files to publish" |
173 |
+ |
common.logger.debug(msg) |
174 |
|
|
175 |
< |
datasets=fileinfo.dataset |
176 |
< |
common.logger.debug(6,"FileInfo = " + str(fileinfo)) |
177 |
< |
common.logger.debug(6,"DatasetInfo = " + str(datasets)) |
178 |
< |
for dataset in datasets: |
179 |
< |
#### for production data |
138 |
< |
self.processedData = dataset['ProcessedDataset'] |
139 |
< |
if (dataset['PrimaryDataset'] == 'null'): |
140 |
< |
dataset['PrimaryDataset'] = dataset['ProcessedDataset'] |
141 |
< |
else: # add parentage from input dataset |
142 |
< |
dataset['ParentDataset']= self.datasetpath |
143 |
< |
|
144 |
< |
dataset['PSetContent']=self.content |
145 |
< |
cfgMeta = {'name' : self.pset , 'Type' : 'user' , 'annotation': 'user cfg', 'version' : 'private version'} # add real name of user cfg |
146 |
< |
common.logger.message("PrimaryDataset = %s"%dataset['PrimaryDataset']) |
147 |
< |
common.logger.message("ProcessedDataset = %s"%dataset['ProcessedDataset']) |
148 |
< |
common.logger.message("<User Dataset Name> = /"+dataset['PrimaryDataset']+"/"+dataset['ProcessedDataset']+"/USER") |
149 |
< |
|
150 |
< |
common.logger.debug(6,"--->>> Inserting primary: %s processed : %s"%(dataset['PrimaryDataset'],dataset['ProcessedDataset'])) |
151 |
< |
|
152 |
< |
primary = DBSWriterObjects.createPrimaryDataset( dataset, dbswriter.dbs) |
153 |
< |
common.logger.debug(6,"Primary: %s "%primary) |
154 |
< |
|
155 |
< |
algo = DBSWriterObjects.createAlgorithm(dataset, cfgMeta, dbswriter.dbs) |
156 |
< |
common.logger.debug(6,"Algo: %s "%algo) |
175 |
> |
#### datasets creation in dbs |
176 |
> |
#// DBS to contact write and read of the same dbs |
177 |
> |
dbsReader = DBSReader(self.DBSURL,level='ERROR') |
178 |
> |
dbswriter = DBSWriter(self.DBSURL) |
179 |
> |
##### |
180 |
|
|
181 |
< |
processed = DBSWriterObjects.createProcessedDataset(primary, algo, dataset, dbswriter.dbs) |
182 |
< |
common.logger.debug(6,"Processed: %s "%processed) |
183 |
< |
|
184 |
< |
common.logger.debug(6,"Inserted primary %s processed %s"%(primary,processed)) |
185 |
< |
|
186 |
< |
common.logger.debug(6,"exit_status = %s "%self.exit_status) |
187 |
< |
return self.exit_status |
181 |
> |
self.published_datasets = [] |
182 |
> |
for fileinfo in jobReport.files: |
183 |
> |
datasets_info=fileinfo.dataset |
184 |
> |
if len(datasets_info)<=0: |
185 |
> |
self.exit_status = '1' |
186 |
> |
msg = "Error: No info about dataset in the xml file "+file |
187 |
> |
common.logger.info(msg) |
188 |
> |
return self.exit_status |
189 |
> |
else: |
190 |
> |
for dataset in datasets_info: |
191 |
> |
#### for production data |
192 |
> |
self.processedData = dataset['ProcessedDataset'] |
193 |
> |
if (dataset['PrimaryDataset'] == 'null'): |
194 |
> |
dataset['PrimaryDataset'] = self.userprocessedData |
195 |
> |
elif self.datasetpath.upper() != 'NONE': |
196 |
> |
dataset['ParentDataset']= self.datasetpath |
197 |
> |
|
198 |
> |
dataset['PSetContent']=self.content |
199 |
> |
cfgMeta = {'name' : self.pset , 'Type' : 'user' , 'annotation': 'user cfg', 'version' : 'private version'} # add real name of user cfg |
200 |
> |
common.logger.info("PrimaryDataset = %s"%dataset['PrimaryDataset']) |
201 |
> |
common.logger.info("ProcessedDataset = %s"%dataset['ProcessedDataset']) |
202 |
> |
common.logger.info("<User Dataset Name> = /"+dataset['PrimaryDataset']+"/"+dataset['ProcessedDataset']+"/USER") |
203 |
> |
|
204 |
> |
self.dataset_to_check="/"+dataset['PrimaryDataset']+"/"+dataset['ProcessedDataset']+"/USER" |
205 |
> |
|
206 |
> |
|
207 |
> |
self.published_datasets.append(self.dataset_to_check) |
208 |
> |
|
209 |
> |
common.logger.log(10-1,"--->>> Inserting primary: %s processed : %s"%(dataset['PrimaryDataset'],dataset['ProcessedDataset'])) |
210 |
> |
|
211 |
> |
#### check if dataset already exists in the DBS |
212 |
> |
result = dbsReader.matchProcessedDatasets(dataset['PrimaryDataset'], 'USER', dataset['ProcessedDataset']) |
213 |
> |
if (len(result) != 0): |
214 |
> |
result = dbsReader.listDatasetFiles(self.dataset_to_check) |
215 |
> |
|
216 |
> |
primary = DBSWriterObjects.createPrimaryDataset( dataset, dbswriter.dbs) |
217 |
> |
common.logger.log(10-1,"Primary: %s "%primary) |
218 |
> |
print "primary = ", primary |
219 |
> |
|
220 |
> |
algo = DBSWriterObjects.createAlgorithm(dataset, cfgMeta, dbswriter.dbs) |
221 |
> |
common.logger.log(10-1,"Algo: %s "%algo) |
222 |
> |
|
223 |
> |
processed = DBSWriterObjects.createProcessedDataset(primary, algo, dataset, dbswriter.dbs) |
224 |
> |
common.logger.log(10-1,"Processed: %s "%processed) |
225 |
> |
print "processed = ", processed |
226 |
> |
|
227 |
> |
common.logger.log(10-1,"Inserted primary %s processed %s"%(primary,processed)) |
228 |
> |
####################################################################################### |
229 |
> |
|
230 |
> |
common.logger.log(10-1,"exit_status = %s "%self.exit_status) |
231 |
> |
return self.exit_status |
232 |
|
|
233 |
|
def publishAJobReport(self,file,procdataset): |
234 |
|
""" |
235 |
|
input: xml file, processedDataset |
236 |
|
""" |
237 |
+ |
common.logger.debug("FJR = %s"%file) |
238 |
|
try: |
239 |
|
jobReport = readJobReport(file)[0] |
240 |
|
self.exit_status = '0' |
253 |
|
elif (file['LFN'] == ''): |
254 |
|
self.noLFN.append(file['PFN']) |
255 |
|
else: |
256 |
< |
if int(file['TotalEvents']) != 0 : |
257 |
< |
#file.lumisections = {} |
258 |
< |
# lumi info are now in run hash |
259 |
< |
file.runs = {} |
256 |
> |
if self.skipOcheck==0: |
257 |
> |
if int(file['TotalEvents']) != 0: |
258 |
> |
### Fede to insert also run and lumi info in DBS |
259 |
> |
#file.runs = {} |
260 |
> |
for ds in file.dataset: |
261 |
> |
### Fede for production |
262 |
> |
if (ds['PrimaryDataset'] == 'null'): |
263 |
> |
ds['PrimaryDataset']=self.userprocessedData |
264 |
> |
filestopublish.append(file) |
265 |
> |
else: |
266 |
> |
self.noEventsFiles.append(file['LFN']) |
267 |
> |
else: |
268 |
> |
### Fede to insert also run and lumi info in DBS |
269 |
> |
#file.runs = {} |
270 |
|
for ds in file.dataset: |
271 |
< |
### FEDE FOR NEW LFN ### |
194 |
< |
#ds['ProcessedDataset']=procdataset |
195 |
< |
######################## |
196 |
< |
### Fede for production |
271 |
> |
### For production |
272 |
|
if (ds['PrimaryDataset'] == 'null'): |
273 |
< |
ds['PrimaryDataset']=procdataset |
273 |
> |
ds['PrimaryDataset']=self.userprocessedData |
274 |
|
filestopublish.append(file) |
275 |
< |
else: |
276 |
< |
self.noEventsFiles.append(file['LFN']) |
275 |
> |
|
276 |
> |
### only good files will be published |
277 |
|
jobReport.files = filestopublish |
278 |
+ |
#print "------>>> filestopublish = ", filestopublish |
279 |
+ |
for file in filestopublish: |
280 |
+ |
common.logger.debug("--->>> LFN of file to publish = " + str(file['LFN'])) |
281 |
+ |
#print "--->>> LFN of file to publish = ", str(file['LFN']) |
282 |
|
### if all files of FJR have number of events = 0 |
283 |
|
if (len(filestopublish) == 0): |
284 |
< |
return None |
285 |
< |
|
284 |
> |
return None |
285 |
> |
|
286 |
|
#// DBS to contact |
287 |
|
dbswriter = DBSWriter(self.DBSURL) |
288 |
|
# insert files |
289 |
|
Blocks=None |
290 |
|
try: |
291 |
< |
Blocks=dbswriter.insertFiles(jobReport) |
292 |
< |
common.logger.message("Blocks = %s"%Blocks) |
291 |
> |
### FEDE added insertDetectorData = True to propagate in DBS info about run and lumi |
292 |
> |
Blocks=dbswriter.insertFiles(jobReport, insertDetectorData = True) |
293 |
> |
common.logger.debug("--->>> Inserting file in blocks = %s"%Blocks) |
294 |
|
except DBSWriterError, ex: |
295 |
< |
common.logger.message("Insert file error: %s"%ex) |
295 |
> |
common.logger.debug("--->>> Insert file error: %s"%ex) |
296 |
|
return Blocks |
297 |
|
|
298 |
|
def run(self): |
299 |
|
""" |
300 |
|
parse of all xml file on res dir and creation of distionary |
301 |
|
""" |
302 |
< |
|
302 |
> |
|
303 |
|
file_list = glob.glob(self.resDir+"crab_fjr*.xml") |
304 |
< |
common.logger.debug(6, "file_list = "+str(file_list)) |
305 |
< |
common.logger.debug(6, "len(file_list) = "+str(len(file_list))) |
306 |
< |
|
304 |
> |
|
305 |
> |
## Select only those fjr that are succesfull |
306 |
> |
if (len(file_list)==0): |
307 |
> |
common.logger.info("--->>> "+self.resDir+" empty: no fjr files in the res dir to publish on DBS") |
308 |
> |
self.exit_status = '1' |
309 |
> |
return self.exit_status |
310 |
> |
|
311 |
> |
good_list=[] |
312 |
> |
|
313 |
> |
for fjr in file_list: |
314 |
> |
reports = readJobReport(fjr) |
315 |
> |
if len(reports)>0: |
316 |
> |
### with backup-copy the wrapper_exit_code is 60308 --> failed |
317 |
> |
if reports[0].status == "Success": |
318 |
> |
good_list.append(fjr) |
319 |
> |
|
320 |
> |
file_list=good_list |
321 |
> |
#print "fjr ok for publication are good_list = ", good_list |
322 |
> |
## |
323 |
> |
common.logger.log(10-1, "fjr with FrameworkJobReport Status='Success', file_list = "+str(file_list)) |
324 |
> |
common.logger.log(10-1, "len(file_list) = "+str(len(file_list))) |
325 |
> |
|
326 |
|
if (len(file_list)>0): |
327 |
|
BlocksList=[] |
328 |
< |
common.logger.message("--->>> Start dataset publication") |
328 |
> |
common.logger.info("--->>> Start dataset publication") |
329 |
> |
### primo fjr trovato |
330 |
|
self.exit_status=self.publishDataset(file_list[0]) |
331 |
+ |
#sys.exit() |
332 |
|
if (self.exit_status == '1'): |
333 |
< |
return self.exit_status |
334 |
< |
common.logger.message("--->>> End dataset publication") |
333 |
> |
return self.exit_status |
334 |
> |
common.logger.info("--->>> End dataset publication") |
335 |
|
|
336 |
|
|
337 |
< |
common.logger.message("--->>> Start files publication") |
337 |
> |
common.logger.info("--->>> Start files publication") |
338 |
> |
|
339 |
> |
### file_list composed by only fjr 'success' : |
340 |
|
for file in file_list: |
238 |
– |
common.logger.message("file = "+file) |
341 |
|
Blocks=self.publishAJobReport(file,self.processedData) |
342 |
|
if Blocks: |
343 |
|
for x in Blocks: # do not allow multiple entries of the same block |
344 |
|
if x not in BlocksList: |
345 |
|
BlocksList.append(x) |
346 |
< |
|
346 |
> |
|
347 |
|
# close the blocks |
348 |
< |
common.logger.debug(6, "BlocksList = %s"%BlocksList) |
247 |
< |
# dbswriter = DBSWriter(self.DBSURL,level='ERROR') |
348 |
> |
common.logger.log(10-1, "BlocksList = %s"%BlocksList) |
349 |
|
dbswriter = DBSWriter(self.DBSURL) |
350 |
< |
|
350 |
> |
|
351 |
|
for BlockName in BlocksList: |
352 |
< |
try: |
352 |
> |
try: |
353 |
|
closeBlock=dbswriter.manageFileBlock(BlockName,maxFiles= 1) |
354 |
< |
common.logger.debug(6, "closeBlock %s"%closeBlock) |
254 |
< |
#dbswriter.dbs.closeBlock(BlockName) |
354 |
> |
common.logger.log(10-1, "closeBlock %s"%closeBlock) |
355 |
|
except DBSWriterError, ex: |
356 |
< |
common.logger.message("Close block error %s"%ex) |
356 |
> |
common.logger.info("Close block error %s"%ex) |
357 |
|
|
358 |
|
if (len(self.noEventsFiles)>0): |
359 |
< |
common.logger.message("--->>> WARNING: "+str(len(self.noEventsFiles))+" files not published because they contain 0 events are:") |
359 |
> |
common.logger.info("--->>> WARNING: "+str(len(self.noEventsFiles))+" files not published because they contain 0 events are:") |
360 |
|
for lfn in self.noEventsFiles: |
361 |
< |
common.logger.message("------ LFN: %s"%lfn) |
361 |
> |
common.logger.info("------ LFN: %s"%lfn) |
362 |
|
if (len(self.noLFN)>0): |
363 |
< |
common.logger.message("--->>> WARNING: there are "+str(len(self.noLFN))+" files not published because they have empty LFN") |
363 |
> |
common.logger.info("--->>> WARNING: there are "+str(len(self.noLFN))+" files not published because they have empty LFN") |
364 |
|
for pfn in self.noLFN: |
365 |
< |
common.logger.message("------ pfn: %s"%pfn) |
365 |
> |
common.logger.info("------ pfn: %s"%pfn) |
366 |
|
if (len(self.problemFiles)>0): |
367 |
< |
common.logger.message("--->>> WARNING: "+str(len(self.problemFiles))+" files not published because they had problem with copy to SE") |
367 |
> |
common.logger.info("--->>> WARNING: "+str(len(self.problemFiles))+" files not published because they had problem with copy to SE") |
368 |
|
for lfn in self.problemFiles: |
369 |
< |
common.logger.message("------ LFN: %s"%lfn) |
370 |
< |
common.logger.message("--->>> End files publication") |
271 |
< |
common.logger.message("--->>> To check data publication please use: InspectDBS2.py --DBSURL=<dbs_url_for_publication> --datasetPath=<User Dataset Name>") |
272 |
< |
return self.exit_status |
369 |
> |
common.logger.info("------ LFN: %s"%lfn) |
370 |
> |
common.logger.info("--->>> End files publication") |
371 |
|
|
372 |
+ |
#### for MULTI PUBLICATION added for #### |
373 |
+ |
for dataset_to_check in self.published_datasets: |
374 |
+ |
self.cfg_params['USER.dataset_to_check']=dataset_to_check |
375 |
+ |
from InspectDBS import InspectDBS |
376 |
+ |
check=InspectDBS(self.cfg_params) |
377 |
+ |
check.checkPublication() |
378 |
+ |
|
379 |
+ |
return self.exit_status |
380 |
+ |
|
381 |
|
else: |
382 |
< |
common.logger.message("--->>> "+self.resDir+" empty: no file to publish on DBS") |
382 |
> |
common.logger.info("--->>> No valid files to publish on DBS. Your jobs do not report exit codes = 0") |
383 |
|
self.exit_status = '1' |
384 |
|
return self.exit_status |
278 |
– |
|