ViewVC Help
View File | Revision Log | Show Annotations | Root Listing
root/cvsroot/COMP/CRAB/python/cms_orca_pubdb.py
Revision: 1.5
Committed: Tue Jun 27 15:39:58 2006 UTC (18 years, 10 months ago) by gutsche
Content type: text/x-python
Branch: MAIN
CVS Tags: CRAB_1_2_0_pre8
Changes since 1.4: +0 -16 lines
Log Message:
removed GridSyncJobId from the fields which are reported every time from the WN script to the DashBoard

File Contents

# User Rev Content
1 slacapra 1.1 from JobType import JobType
2     from crab_logger import Logger
3     from crab_exceptions import *
4     from crab_util import *
5     import common
6     import PubDB
7     import orcarcBuilder
8     import orcarcBuilderOld
9     import Scram
10     import TarBall
11    
12     import os, string, re
13     import math
14    
15     class Orca_pubdb(JobType):
16     def __init__(self, cfg_params):
17 slacapra 1.2 JobType.__init__(self, 'ORCA_PUBDB')
18 slacapra 1.1 common.logger.debug(3,'ORCA_pubdb::__init__')
19    
20     self.analisys_common_info = {}
21     # Marco.
22     self._params = {}
23     self.cfg_params = cfg_params
24    
25     log = common.logger
26    
27     self.scram = Scram.Scram(cfg_params)
28     scramArea = ''
29     self.additional_inbox_files = []
30     self.scriptExe = ''
31    
32     self.version = self.scram.getSWVersion()
33     self.setParam_('application', self.version)
34     common.analisys_common_info['sw_version'] = self.version
35     common.analisys_common_info['copy_input_data'] = 0
36    
37     self.total_number_of_jobs = 0
38     self.job_number_of_events = 0
39    
40     ### collect Data cards
41     try:
42     self.owner = cfg_params['ORCA.owner']
43     self.setParam_('owner', self.owner)
44     log.debug(6, "Orca::Orca(): owner = "+self.owner)
45     self.dataset = cfg_params['ORCA.dataset']
46     self.setParam_('dataset', self.dataset)
47     log.debug(6, "Orca::Orca(): dataset = "+self.dataset)
48     except KeyError:
49     msg = "Error: owner and/or dataset not defined "
50     raise CrabException(msg)
51    
52     self.dataTiers = []
53     try:
54     tmpDataTiers = string.split(cfg_params['ORCA.data_tier'],',')
55     for tmp in tmpDataTiers:
56     tmp=string.strip(tmp)
57     self.dataTiers.append(tmp)
58     pass
59     pass
60     except KeyError:
61     pass
62     log.debug(6, "Orca_pubdb::Orca_pubdb(): dataTiers = "+str(self.dataTiers))
63    
64     ## now the application
65     try:
66     self.executable = cfg_params['ORCA.executable']
67     log.debug(6, "Orca_pubdb::Orca_pubdb(): executable = "+self.executable)
68     self.setParam_('exe', self.executable)
69     except KeyError:
70     msg = "Error: executable not defined "
71     raise CrabException(msg)
72    
73     try:
74     self.orcarc_file = cfg_params['ORCA.orcarc_file']
75     log.debug(6, "Orca_pubdb::Orca_pubdb(): orcarc file = "+self.orcarc_file)
76     if (not os.path.exists(self.orcarc_file)):
77     raise CrabException("User defined .orcarc file "+self.orcarc_file+" does not exist")
78     except KeyError:
79     log.message("Using empty orcarc file")
80     self.orcarc_file = ''
81    
82     # output files
83     try:
84     self.output_file = []
85    
86     tmp = cfg_params['ORCA.output_file']
87     if tmp != '':
88     tmpOutFiles = string.split(cfg_params['ORCA.output_file'],',')
89     log.debug(7, 'Orca_pubdb::Orca_pubdb(): output files '+str(tmpOutFiles))
90     for tmp in tmpOutFiles:
91     tmp=string.strip(tmp)
92     self.output_file.append(tmp)
93     pass
94    
95     else:
96     log.message("No output file defined: only stdout/err will be available")
97     pass
98     pass
99     except KeyError:
100     log.message("No output file defined: only stdout/err will be available")
101     pass
102    
103     # script_exe file as additional file in inputSandbox
104     try:
105     self.scriptExe = cfg_params['ORCA.script_exe']
106     self.additional_inbox_files.append(self.scriptExe)
107     except KeyError:
108     pass
109     if self.scriptExe != '':
110     if os.path.isfile(self.scriptExe):
111     pass
112     else:
113     log.message("WARNING. file "+self.scriptExe+" not found")
114     sys.exit()
115    
116     ## additional input files
117     try:
118     tmpAddFiles = string.split(cfg_params['USER.additional_input_files'],',')
119     for tmp in tmpAddFiles:
120     tmp=string.strip(tmp)
121     self.additional_inbox_files.append(tmp)
122     pass
123     pass
124     except KeyError:
125     pass
126    
127     try:
128 slacapra 1.3 self.total_number_of_events = int(cfg_params['ORCA.total_number_of_events'])
129 slacapra 1.1 except KeyError:
130     msg = 'Must define total_number_of_events and job_number_of_events'
131     raise CrabException(msg)
132    
133     try:
134 slacapra 1.3 self.first_event = int(cfg_params['ORCA.first_event'])
135 slacapra 1.1 except KeyError:
136     self.first_event = 0
137     pass
138     log.debug(6, "Orca_pubdb::Orca_pubdb(): total number of events = "+`self.total_number_of_events`)
139     #log.debug(6, "Orca_pubdb::Orca_pubdb(): events per job = "+`self.job_number_of_events`)
140     log.debug(6, "Orca_pubdb::Orca_pubdb(): first event = "+`self.first_event`)
141    
142     self.maxEvents=0 # max events available in any PubDB
143     self.connectPubDB(cfg_params)
144    
145     # [-- self.checkNevJobs() --]
146    
147     self.TarBaller = TarBall.TarBall(self.executable, self.scram)
148     self.tgzNameWithPath = self.TarBaller.prepareTarBall()
149    
150     try:
151     self.ML = int(cfg_params['USER.activate_monalisa'])
152     except KeyError:
153     self.ML = 0
154     pass
155    
156     self.setTaskid_()
157     self.setParam_('taskId', self.cfg_params['taskId'])
158    
159     return
160    
161     def split(self, jobParams):
162     """
163     This method returns the list of specific job type items
164     needed to run the jobs
165     """
166     common.jobDB.load()
167     njobs = self.total_number_of_jobs
168     # create the empty structure
169     for i in range(njobs):
170     jobParams.append("")
171    
172     # fill the both the list and the DB (part of the code taken from jobsToDB)
173     firstEvent = self.first_event
174     lastJobsNumberOfEvents = self.job_number_of_events
175     # last jobs is different...
176     for job in range(njobs-1):
177     jobParams[job] = [firstEvent, lastJobsNumberOfEvents]
178     common.jobDB.setArguments(job, jobParams[job])
179     firstEvent += self.job_number_of_events
180    
181     # this is the last job
182     lastJobsNumberOfEvents = (self.total_number_of_events + self.first_event) - firstEvent
183     status = common.jobDB.status(njobs - 1)
184     jobParams[njobs - 1] = [firstEvent, lastJobsNumberOfEvents]
185     common.jobDB.setArguments(njobs - 1, jobParams[njobs - 1])
186    
187     if (lastJobsNumberOfEvents!=self.job_number_of_events):
188     common.logger.message(str(self.total_number_of_jobs-1)+' jobs will be created for '+str(self.job_number_of_events)+' events each plus 1 for '+str(lastJobsNumberOfEvents)+' events for a total of '+str(self.job_number_of_events*(self.total_number_of_jobs-1)+lastJobsNumberOfEvents)+' events')
189     else:
190     common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for '+str(self.job_number_of_events)+' events each for a total of '+str(self.job_number_of_events*(self.total_number_of_jobs-1)+lastJobsNumberOfEvents)+' events')
191    
192     common.jobDB.save()
193     return
194    
195     def getJobTypeArguments(self, nj, sched):
196     params = common.jobDB.arguments(nj)
197    
198     if sched=="EDG" or sched=="GRID":
199     parString = "" + str(params[0])+' '+str(params[1])
200     elif sched=="BOSS":
201     parString = "" + str(params[0])+' '+str(params[1])
202     elif sched=="CONDOR":
203     parString = "" + str(params[0])+' '+str(params[1])+' '+common.analisys_common_info['sites'][0]
204     else:
205     return ""
206     return parString
207    
208     def numberOfJobs(self):
209     first_event = self.first_event
210     maxAvailableEvents = int(self.maxEvents)
211     common.logger.debug(1,"Available events: "+str(maxAvailableEvents))
212    
213     if first_event>=maxAvailableEvents:
214     raise CrabException('First event is bigger than maximum number of available events!')
215    
216     try:
217     n = self.total_number_of_events
218     if n == 'all': n = '-1'
219     if n == '-1':
220     tot_num_events = (maxAvailableEvents - first_event)
221     common.logger.debug(1,"Analysing all available events "+str(tot_num_events))
222     else:
223     if maxAvailableEvents<(int(n)+ first_event): # + self.first_event):
224     raise CrabException('(First event + total events)='+str(int(n)+first_event)+' is bigger than maximum number of available events '+str(maxAvailableEvents)+' !! Use "total_number_of_events=-1" to analyze to whole dataset')
225     tot_num_events = int(n)
226     except KeyError:
227     common.logger.message("total_number_of_events not defined, set it to maximum available")
228     tot_num_events = (maxAvailableEvents - first_event)
229     pass
230     common.logger.message("Total number of events to be analyzed: "+str(self.total_number_of_events))
231    
232    
233     # read user directives
234     eventPerJob=0
235     try:
236 slacapra 1.3 eventPerJob = self.cfg_params['ORCA.job_number_of_events']
237 slacapra 1.1 except KeyError:
238     pass
239    
240     jobsPerTask=0
241     try:
242 slacapra 1.3 jobsPerTask = int(self.cfg_params['ORCA.total_number_of_jobs'])
243 slacapra 1.1 except KeyError:
244     pass
245    
246     # If both the above set, complain and use event per jobs
247     if eventPerJob>0 and jobsPerTask>0:
248     msg = 'Warning. '
249     msg += 'job_number_of_events and total_number_of_jobs are both defined '
250     msg += 'Using job_number_of_events.'
251     common.logger.message(msg)
252     jobsPerTask = 0
253     if eventPerJob==0 and jobsPerTask==0:
254     msg = 'Warning. '
255     msg += 'job_number_of_events and total_number_of_jobs are not defined '
256     msg += 'Creating just one job for all events.'
257     common.logger.message(msg)
258     jobsPerTask = 1
259    
260     # first case: events per job defined
261     if eventPerJob>0:
262     n=eventPerJob
263     #if n == 'all' or n == '-1' or (int(n)>self.total_number_of_events and self.total_number_of_events>0):
264     if n == 'all' or n == '-1' or (int(n)>tot_num_events and tot_num_events>0):
265     common.logger.message("Asking more events than available: set it to maximum available")
266     job_num_events = tot_num_events
267     tot_num_jobs = 1
268     else:
269     job_num_events = int(n)
270     tot_num_jobs = int((tot_num_events-1)/job_num_events)+1
271    
272     elif jobsPerTask>0:
273     common.logger.debug(2,"total number of events: "+str(tot_num_events)+" JobPerTask "+str(jobsPerTask))
274     job_num_events = int(math.floor((tot_num_events)/jobsPerTask))
275     tot_num_jobs = jobsPerTask
276    
277     # should not happen...
278     else:
279     raise CrabException('Something wrong with splitting')
280    
281     common.logger.debug(2,"total number of events: "+str(tot_num_events)+" events per job: "+str(job_num_events))
282    
283     #used by jobsToDB for logs
284     self.job_number_of_events = job_num_events
285     self.total_number_of_jobs = tot_num_jobs
286     return tot_num_jobs
287    
288    
289     def jobsToDB(self, nJobs):
290     """
291     Fill the DB with proper entries for ORCA-DBS-DLS
292     """
293    
294     firstEvent = self.first_event
295     lastJobsNumberOfEvents = self.job_number_of_events
296    
297     # last jobs is different...
298     for job in range(nJobs-1):
299     common.jobDB.setFirstEvent(job, firstEvent)
300     common.jobDB.setMaxEvents(job, self.job_number_of_events)
301     firstEvent=firstEvent+self.job_number_of_events
302    
303     # this is the last job
304     common.jobDB.setFirstEvent(nJobs-1, firstEvent)
305     lastJobsNumberOfEvents= (self.total_number_of_events+self.first_event)-firstEvent
306     common.jobDB.setMaxEvents(nJobs-1, lastJobsNumberOfEvents)
307    
308     if (lastJobsNumberOfEvents!=self.job_number_of_events):
309     common.logger.message(str(self.total_number_of_jobs-1)+' jobs will be created for '+str(self.job_number_of_events)+' events each plus 1 for '+str(lastJobsNumberOfEvents)+' events for a total of '+str(self.job_number_of_events*(self.total_number_of_jobs-1)+lastJobsNumberOfEvents)+' events')
310     else:
311     common.logger.message(str(self.total_number_of_jobs)+' jobs will be created for '+str(self.job_number_of_events)+' events each for a total of '+str(self.job_number_of_events*(self.total_number_of_jobs-1)+lastJobsNumberOfEvents)+' events')
312    
313     return
314    
315    
316     def wsSetupEnvironment(self, nj):
317     """
318     Returns part of a job script which prepares
319     the execution environment for the job 'nj'.
320     """
321    
322     # Prepare JobType-independent part
323     txt = ''
324    
325     ## OLI_Daniele at this level middleware already known
326    
327     txt += 'if [ $middleware == LCG ]; then \n'
328     txt += self.wsSetupCMSLCGEnvironment_()
329     txt += 'elif [ $middleware == OSG ]; then\n'
330     txt += ' time=`date -u +"%s"`\n'
331     txt += ' WORKING_DIR=$OSG_WN_TMP/cms_$time\n'
332     txt += ' echo "Creating working directory: $WORKING_DIR"\n'
333     txt += ' /bin/mkdir -p $WORKING_DIR\n'
334     txt += ' if [ ! -d $WORKING_DIR ] ;then\n'
335     txt += ' echo "SET_CMS_ENV 10016 ==> OSG $WORKING_DIR could not be created on WN `hostname`"\n'
336     txt += ' echo "JOB_EXIT_STATUS = 10016"\n'
337     txt += ' echo "JobExitCode=10016" | tee -a $RUNTIME_AREA/$repo\n'
338     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
339 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
340     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
341     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
342 slacapra 1.1 txt += ' exit 1\n'
343     txt += ' fi\n'
344     txt += '\n'
345     txt += ' echo "Change to working directory: $WORKING_DIR"\n'
346     txt += ' cd $WORKING_DIR\n'
347     txt += self.wsSetupCMSOSGEnvironment_()
348     txt += 'fi\n'
349    
350     # Prepare JobType-specific part
351     scram = self.scram.commandName()
352     txt += '\n\n'
353     txt += 'echo "### SPECIFIC JOB SETUP ENVIRONMENT ###"\n'
354     txt += scram+' project ORCA '+self.version+'\n'
355     txt += 'status=$?\n'
356     txt += 'if [ $status != 0 ] ; then\n'
357     txt += ' echo "SET_EXE_ENV 10034 ==>ERROR ORCA '+self.version+' not found on `hostname`" \n'
358     txt += ' echo "JOB_EXIT_STATUS = 10034"\n'
359     txt += ' echo "JobExitCode=10034" | tee -a $RUNTIME_AREA/$repo\n'
360     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
361 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
362     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
363     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
364 slacapra 1.1
365     ## OLI_Daniele
366     txt += ' if [ $middleware == OSG ]; then \n'
367     txt += ' \n'
368     txt += ' echo "Remove working directory: $WORKING_DIR"\n'
369     txt += ' cd $RUNTIME_AREA\n'
370     txt += ' /bin/rm -rf $WORKING_DIR\n'
371     txt += ' if [ -d $WORKING_DIR ] ;then\n'
372 gutsche 1.4 txt += ' echo "SET_CMS_ENV 10018 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after ORCA not found on `hostname`"\n'
373 slacapra 1.1 txt += ' echo "JOB_EXIT_STATUS = 10018"\n'
374     txt += ' echo "JobExitCode=10018" | tee -a $RUNTIME_AREA/$repo\n'
375     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
376 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
377     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
378     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
379 slacapra 1.1 txt += ' fi\n'
380     txt += ' fi \n'
381     txt += ' \n'
382     txt += ' exit 1\n'
383     txt += 'fi \n'
384     txt += 'echo "ORCA_VERSION = '+self.version+'"\n'
385     txt += 'cd '+self.version+'\n'
386     ### needed grep for bug in scramv1 ###
387    
388     #txt += 'eval `'+scram+' runtime -sh | grep -v SCRAMRT_LSB_JOBNAME`\n'
389    
390     # Handle the arguments:
391     txt += "\n"
392     txt += "## ARGUMNETS: $1 Job Number\n"
393     txt += "## ARGUMNETS: $2 First Event for this job\n"
394     txt += "## ARGUMNETS: $3 Max Event for this job\n"
395     txt += "\n"
396     txt += "narg=$#\n"
397     txt += "NJob=$1\n"
398     txt += "FirstEvent=$2\n"
399     txt += "MaxEvents=$3\n"
400     txt += "if [ $narg -lt 3 ]\n"
401     txt += "then\n"
402     txt += " echo 'SET_EXE_ENV 50113 ==> ERROR Too few arguments' +$narg+ \n"
403     txt += ' echo "JOB_EXIT_STATUS = 50113"\n'
404     txt += ' echo "JobExitCode=50113" | tee -a $RUNTIME_AREA/$repo\n'
405     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
406 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
407     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
408     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
409 slacapra 1.1
410     ## OLI_Daniele
411     txt += ' if [ $middleware == OSG ]; then \n'
412     txt += ' echo "Remove working directory: $WORKING_DIR"\n'
413     txt += ' cd $RUNTIME_AREA\n'
414     txt += ' /bin/rm -rf $WORKING_DIR\n'
415     txt += ' if [ -d $WORKING_DIR ] ;then\n'
416     txt += ' echo "SET_EXE_ENV 50114 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Too few arguments for CRAB job wrapper"\n'
417     txt += ' echo "JOB_EXIT_STATUS = 50114"\n'
418     txt += ' echo "JobExitCode=50114" | tee -a $RUNTIME_AREA/$repo\n'
419     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
420 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
421     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
422     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
423 slacapra 1.1 txt += ' fi\n'
424     txt += ' fi \n'
425     txt += ' \n'
426     txt += " exit 1\n"
427     txt += "fi\n"
428     txt += "\n"
429    
430     # Prepare job-specific part
431     job = common.job_list[nj]
432     orcarc = os.path.basename(job.configFilename())
433     txt += '\n'
434     txt += 'cp $RUNTIME_AREA/'+orcarc+' .orcarc\n'
435     txt += 'if [ -e $RUNTIME_AREA/orcarc_$CE ] ; then\n'
436     txt += ' cat $RUNTIME_AREA/orcarc_$CE .orcarc >> .orcarc_tmp\n'
437     txt += ' mv .orcarc_tmp .orcarc\n'
438     txt += 'fi\n'
439     txt += 'if [ -e $RUNTIME_AREA/init_$CE.sh ] ; then\n'
440     txt += ' cp $RUNTIME_AREA/init_$CE.sh init.sh\n'
441     txt += 'fi\n'
442    
443     if len(self.additional_inbox_files) > 0:
444     for file in self.additional_inbox_files:
445     file = os.path.basename(file)
446     txt += 'if [ -e $RUNTIME_AREA/'+file+' ] ; then\n'
447     txt += ' cp $RUNTIME_AREA/'+file+' .\n'
448     txt += ' chmod +x '+file+'\n'
449     txt += 'fi\n'
450     pass
451    
452     txt += '\n'
453     txt += 'chmod +x ./init.sh\n'
454     txt += './init.sh\n'
455     txt += 'exitStatus=$?\n'
456     txt += 'if [ $exitStatus != 0 ] ; then\n'
457     txt += ' echo "SET_EXE_ENV 20001 ==> ERROR StageIn init script failed"\n'
458     txt += ' echo "JOB_EXIT_STATUS = $exitStatus" \n'
459     txt += ' echo "JobExitCode=20001" | tee -a $RUNTIME_AREA/$repo\n'
460     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
461 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
462     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
463     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
464 slacapra 1.1
465     ### OLI_DANIELE
466     txt += ' if [ $middleware == OSG ]; then \n'
467     txt += ' echo "Remove working directory: $WORKING_DIR"\n'
468     txt += ' cd $RUNTIME_AREA\n'
469     txt += ' /bin/rm -rf $WORKING_DIR\n'
470     txt += ' if [ -d $WORKING_DIR ] ;then\n'
471     txt += ' echo "SET_EXE 10012 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after StageIn init script failed"\n'
472     txt += ' echo "JOB_EXIT_STATUS = 10012"\n'
473     txt += ' echo "JobExitCode=10012" | tee -a $RUNTIME_AREA/$repo\n'
474     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
475 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
476     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
477     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
478 slacapra 1.1 txt += ' fi\n'
479     txt += ' fi \n'
480     txt += ' exit 1\n'
481     txt += 'fi\n'
482     txt += "echo 'SET_EXE_ENV 0 ==> job setup ok'\n"
483     txt += 'echo "### END JOB SETUP ENVIRONMENT ###"\n\n'
484    
485     txt += 'echo "FirstEvent=$FirstEvent" >> .orcarc\n'
486     txt += 'echo "MaxEvents=$MaxEvents" >> .orcarc\n'
487     if self.ML:
488     txt += 'echo "MonalisaJobId=$NJob" >> .orcarc\n'
489    
490     txt += '\n'
491     txt += 'echo "***** cat .orcarc *********"\n'
492     txt += 'cat .orcarc\n'
493     txt += 'echo "****** end .orcarc ********"\n'
494     return txt
495    
496     def wsBuildExe(self, nj):
497     """
498     Put in the script the commands to build an executable
499     or a library.
500     """
501    
502     txt = ""
503    
504     if os.path.isfile(self.tgzNameWithPath):
505     txt += 'echo "tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'"\n'
506     txt += 'tar xzvf $RUNTIME_AREA/'+os.path.basename(self.tgzNameWithPath)+'\n'
507     txt += 'untar_status=$? \n'
508     txt += 'if [ $untar_status -ne 0 ]; then \n'
509     txt += ' echo "SET_EXE 1 ==> ERROR Untarring .tgz file failed"\n'
510     txt += ' echo "JOB_EXIT_STATUS = $untar_status" \n'
511     txt += ' echo "JobExitCode=$untar_status" | tee -a $repo\n'
512    
513     ### OLI_DANIELE
514     txt += ' if [ $middleware == OSG ]; then \n'
515     txt += ' echo "Remove working directory: $WORKING_DIR"\n'
516     txt += ' cd $RUNTIME_AREA\n'
517     txt += ' /bin/rm -rf $WORKING_DIR\n'
518     txt += ' if [ -d $WORKING_DIR ] ;then\n'
519     txt += ' echo "SET_EXE 50999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after Untarring .tgz file failed"\n'
520     txt += ' echo "JOB_EXIT_STATUS = 50999"\n'
521     txt += ' echo "JobExitCode=50999" | tee -a $RUNTIME_AREA/$repo\n'
522     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
523 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
524     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
525     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
526 slacapra 1.1 txt += ' fi\n'
527     txt += ' fi \n'
528     txt += ' \n'
529     txt += ' exit 1 \n'
530     txt += 'else \n'
531     txt += ' echo "Successful untar" \n'
532     txt += 'fi \n'
533     # TODO: what does this code do here ?
534     # SL check that lib/Linux__... is present
535     txt += 'mkdir -p lib/${SCRAM_ARCH} \n'
536     pass
537     txt += 'eval `'+self.scram.commandName()+' runtime -sh |grep -v SCRAMRT_LSB_JOBNAME`'+'\n'
538    
539     return txt
540    
541     def wsRenameOutput(self, nj):
542     """
543     Returns part of a job script which renames the produced files.
544     """
545    
546     txt = '\n'
547     txt += '# directory content\n'
548     txt += 'ls \n'
549     file_list = ''
550     for fileWithSuffix in self.output_file:
551     output_file_num = self.numberFile_(fileWithSuffix, '$NJob')
552     file_list=file_list+output_file_num+' '
553     txt += '\n'
554     txt += 'ls '+fileWithSuffix+'\n'
555     txt += 'exe_result=$?\n'
556     txt += 'if [ $exe_result -ne 0 ] ; then\n'
557     txt += ' echo "ERROR: No output file to manage"\n'
558     txt += ' echo "JOB_EXIT_STATUS = $exe_result"\n'
559     txt += ' echo "JobExitCode=60302" | tee -a $RUNTIME_AREA/$repo\n'
560     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
561 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
562     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
563     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
564    
565 slacapra 1.1 ### OLI_DANIELE
566     if common.scheduler.boss_scheduler_name == 'condor_g':
567     txt += ' if [ $middleware == OSG ]; then \n'
568     txt += ' echo "prepare dummy output file"\n'
569     txt += ' echo "Processing of job output failed" > $RUNTIME_AREA/'+output_file_num+'\n'
570     txt += ' fi \n'
571     txt += 'else\n'
572     txt += ' cp '+fileWithSuffix+' $RUNTIME_AREA/'+output_file_num+'\n'
573     txt += 'fi\n'
574    
575     pass
576    
577    
578     txt += 'cd $RUNTIME_AREA\n'
579     file_list=file_list[:-1]
580     txt += 'file_list="'+file_list+'"\n'
581     ### OLI_DANIELE
582     txt += 'if [ $middleware == OSG ]; then\n'
583     txt += ' cd $RUNTIME_AREA\n'
584     txt += ' echo "Remove working directory: $WORKING_DIR"\n'
585     txt += ' /bin/rm -rf $WORKING_DIR\n'
586     txt += ' if [ -d $WORKING_DIR ] ;then\n'
587     txt += ' echo "SET_EXE 60999 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after cleanup of WN"\n'
588     txt += ' echo "JOB_EXIT_STATUS = 60999"\n'
589     txt += ' echo "JobExitCode=60999" | tee -a $RUNTIME_AREA/$repo\n'
590     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
591 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
592     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
593     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
594 slacapra 1.1 txt += ' fi\n'
595     txt += 'fi\n'
596     txt += '\n'
597    
598     return txt
599    
600     def executableName(self):
601     if self.scriptExe != '':
602     return "./" + os.path.basename(self.scriptExe)
603     else:
604     return self.executable
605    
606     def connectPubDB(self, cfg_params):
607    
608     fun = "Orca_pubdb::connectPubDB()"
609    
610     self.allOrcarcs = []
611     # first check if the info from PubDB have been already processed
612     if os.path.exists(common.work_space.shareDir()+'PubDBSummaryFile') :
613     common.logger.debug(6, fun+": info from PubDB has been already processed -- use it")
614     f = open( common.work_space.shareDir()+'PubDBSummaryFile', 'r' )
615     for i in f.readlines():
616     a=string.split(i,' ')
617     self.allOrcarcs.append(orcarcBuilderOld.constructFromFile(a[0:-1]))
618     pass
619     for o in self.allOrcarcs:
620     # o.dump()
621     if o.Nevents >= self.maxEvents:
622     self.maxEvents= o.Nevents
623     pass
624     pass
625     pass
626    
627     else: # PubDB never queried
628     common.logger.debug(6, fun+": PubDB was never queried -- do it")
629     # New PubDB class by SL
630     try:
631     self.pubdb = PubDB.PubDB(self.owner,
632     self.dataset,
633     self.dataTiers,
634     cfg_params)
635     except PubDB.RefDBmapError:
636     msg = 'ERROR ***: accessing PubDB'
637     raise CrabException(msg)
638    
639     ## extract info from pubDB (grouped by PubDB version :
640     ## pubDBData contains a list of info for the new-style PubDBs,
641     ## and a list of info for the old-style PubDBs )
642     self.pubDBData = self.pubdb.getAllPubDBData()
643    
644     ## check and exit if no data are published in any PubDB
645     nodata=1
646     for PubDBversion in self.pubDBData.keys():
647     if len(self.pubDBData[PubDBversion])>0:
648     nodata=0
649     if (nodata):
650     msg = 'Owner '+self.owner+' Dataset '+ self.dataset+ ' not published in any PubDB with asked dataTiers '+string.join(self.dataTiers,'-')+' ! '
651     raise CrabException(msg)
652    
653     ## logging PubDB content for debugging
654     for PubDBversion in self.pubDBData.keys():
655     common.logger.debug(6, fun+": PubDB "+PubDBversion+" info ("+`len(self.pubDBData[PubDBversion])`+"):\/")
656     for aa in self.pubDBData[PubDBversion]:
657     common.logger.debug(6, "---------- start of a PubDB")
658     for bb in aa:
659     if common.logger.debugLevel() >= 6 :
660     common.logger.debug(6, str(bb.dump()))
661     pass
662     pass
663     common.logger.debug(6, "----------- end of a PubDB")
664     common.logger.debug(6, fun+": End of PubDB "+PubDBversion+" info\n")
665    
666    
667     ## building orcarc : switch between info from old and new-style PubDB
668     currDir = os.getcwd()
669     os.chdir(common.work_space.jobDir())
670    
671     tmpOrcarcList=[]
672     for PubDBversion in self.pubDBData.keys():
673     if len(self.pubDBData[PubDBversion])>0 :
674     #print (" PubDB-style : %s"%(PubDBversion))
675     if PubDBversion=='newPubDB' :
676     self.builder = orcarcBuilder.orcarcBuilder(cfg_params)
677     else :
678     self.builder = orcarcBuilderOld.orcarcBuilderOld()
679     tmpAllOrcarcs = self.builder.createOrcarcAndInit(self.pubDBData[PubDBversion])
680     tmpOrcarcList.append(tmpAllOrcarcs)
681     #print 'version ',PubDBversion,' tmpAllOrcarcs ', tmpAllOrcarcs
682    
683     #print tmpOrcarcList
684     os.chdir(currDir)
685    
686     self.maxEvents=0
687     for tmpAllOrcarcs in tmpOrcarcList:
688     for o in tmpAllOrcarcs:
689     numEvReq=self.total_number_of_events
690     if ((numEvReq == '-1') | (numEvReq <= o.Nevents)):
691     self.allOrcarcs.append(o)
692     if (int(o.Nevents) >= self.maxEvents):
693     self.maxEvents= int(o.Nevents)
694     pass
695     pass
696     pass
697    
698     # set maximum number of event available
699    
700     # I save to a file self.allOrcarcs
701    
702     PubDBSummaryFile = open(common.work_space.shareDir()+'PubDBSummaryFile','w')
703     for o in self.allOrcarcs:
704     for d in o.content():
705     PubDBSummaryFile.write(d)
706     PubDBSummaryFile.write(' ')
707     pass
708     PubDBSummaryFile.write('\n')
709     pass
710     PubDBSummaryFile.close()
711     ### fede
712     #for o in self.allOrcarcs:
713     # o.dump()
714     pass
715    
716     # build a list of sites
717     ces= []
718     for o in self.allOrcarcs:
719     ces.append(o.CE)
720     pass
721    
722     if len(ces)==0:
723     msg = 'No PubDBs publish correct catalogs or enough events! '
724     msg += `self.total_number_of_events`
725     raise CrabException(msg)
726    
727     common.logger.debug(6, "List of CEs: "+str(ces))
728     common.analisys_common_info['sites'] = ces
729     self.setParam_('TargetCE', ','.join(ces))
730    
731     return
732    
733     def nJobs(self):
734     # TODO: should not be here !
735     # JobType should have no internal knowledge about submitted jobs
736     # One possibility is to use len(common.job_list).
737     """ return the number of job to be created """
738     return len(common.job_list)
739    
740     def prepareSteeringCards(self):
741     """
742     modify the orcarc card provided by the user,
743     writing a new card into share dir
744     """
745     infile = ''
746     try:
747     infile = open(self.orcarc_file,'r')
748     except:
749     self.orcarc_file = 'empty.orcarc'
750     cmd='touch '+self.orcarc_file
751     runCommand(cmd)
752     infile = open(self.orcarc_file,'r')
753    
754     outfile = open(common.work_space.jobDir()+self.name()+'.orcarc', 'w')
755    
756     inline=infile.readlines()
757     ### remove from user card these lines ###
758     wordRemove=['InputFileCatalogURL', 'InputCollections', 'FirstEvent', 'MaxEvents', 'TFileAdaptor', 'MonRecAlisaBuilder']
759     for line in inline:
760     word = string.strip(string.split(line,'=')[0])
761    
762     if word not in wordRemove:
763     outfile.write(line)
764     else:
765     continue
766     pass
767    
768     outfile.write('\n\n##### The following cards have been created by CRAB: DO NOT TOUCH #####\n')
769     outfile.write('TFileAdaptor = true\n')
770    
771     outfile.write('MonRecAlisaBuilder=false\n')
772    
773     outfile.write('InputCollections=/System/'+self.owner+'/'+self.dataset+'/'+self.dataset+'\n')
774    
775     infile.close()
776     outfile.close()
777     return
778    
779     def modifySteeringCards(self, nj):
780     """
781     Creates steering cards file modifying a template file
782     """
783     return
784    
785     def cardsBaseName(self):
786     """
787     Returns name of user orcarc card-file
788     """
789     return os.path.split (self.orcarc_file)[1]
790    
791     ### content of input_sanbdox ...
792     def inputSandbox(self, nj):
793     """
794     Returns a list of filenames to be put in JDL input sandbox.
795     """
796     inp_box = []
797     # dict added to delete duplicate from input sandbox file list
798     seen = {}
799     ## code
800     if os.path.isfile(self.tgzNameWithPath):
801     inp_box.append(self.tgzNameWithPath)
802     ## orcarc
803     for o in self.allOrcarcs:
804     for f in o.fileList():
805     if (f not in seen.keys()):
806     inp_box.append(common.work_space.jobDir()+f)
807     seen[f] = 1
808    
809     ## config
810     inp_box.append(common.job_list[nj].configFilename())
811     ## additional input files
812     #inp_box = inp_box + self.additional_inbox_files
813     return inp_box
814    
815     ### and of output_sandbox
816     def outputSandbox(self, nj):
817     """
818     Returns a list of filenames to be put in JDL output sandbox.
819     """
820     out_box = []
821    
822     stdout=common.job_list[nj].stdout()
823     stderr=common.job_list[nj].stderr()
824     #out_box.append(stdout)
825     #out_box.append(stderr)
826    
827     ## User Declared output files
828     for out in self.output_file:
829     n_out = nj + 1
830     #FEDE
831     #out_box.append(self.version+'/'+self.numberFile_(out,str(n_out)))
832     out_box.append(self.numberFile_(out,str(n_out)))
833     return out_box
834    
835     def getRequirements(self):
836     """
837     return job requirements to add to jdl files
838     """
839     req = ''
840     if common.analisys_common_info['sites']:
841     if common.analisys_common_info['sw_version']:
842     req='Member("VO-cms-' + \
843     common.analisys_common_info['sw_version'] + \
844     '", other.GlueHostApplicationSoftwareRunTimeEnvironment)'
845     if len(common.analisys_common_info['sites'])>0:
846     req = req + ' && ('
847     for i in range(len(common.analisys_common_info['sites'])):
848     req = req + 'other.GlueCEInfoHostName == "' \
849     + common.analisys_common_info['sites'][i] + '"'
850     if ( i < (int(len(common.analisys_common_info['sites']) - 1)) ):
851     req = req + ' || '
852     req = req + ')'
853     #print "req = ", req
854     return req
855    
856     def numberFile_(self, file, txt):
857     """
858     append _'txt' before last extension of a file
859     """
860     p = string.split(file,".")
861     # take away last extension
862     name = p[0]
863     for x in p[1:-1]:
864     name=name+"."+x
865     # add "_txt"
866     if len(p)>1:
867     ext = p[len(p)-1]
868     #result = name + '_' + str(txt) + "." + ext
869     result = name + '_' + txt + "." + ext
870     else:
871     #result = name + '_' + str(txt)
872     result = name + '_' + txt
873    
874     return result
875    
876    
877     def stdOut(self):
878     return self.stdOut_
879    
880     def stdErr(self):
881     return self.stdErr_
882    
883     # marco
884     def setParam_(self, param, value):
885     self._params[param] = value
886    
887     def getParams(self):
888     return self._params
889    
890     def setTaskid_(self):
891     self._taskId = self.cfg_params['taskId']
892    
893     def getTaskid(self):
894     return self._taskId
895     # marco
896    
897     def configFilename(self):
898     """ return the config filename """
899     return self.name()+'.orcarc'
900    
901     ### OLI_DANIELE
902     def wsSetupCMSOSGEnvironment_(self):
903     """
904     Returns part of a job script which is prepares
905     the execution environment and which is common for all CMS jobs.
906     """
907     txt = '\n'
908     txt += ' echo "### SETUP CMS OSG ENVIRONMENT ###"\n'
909     txt += ' if [ -f $GRID3_APP_DIR/cmssoft/cmsset_default.sh ] ;then\n'
910     txt += ' # Use $GRID3_APP_DIR/cmssoft/cmsset_default.sh to setup cms software\n'
911     txt += ' source $GRID3_APP_DIR/cmssoft/cmsset_default.sh '+self.version+'\n'
912     txt += ' elif [ -f $OSG_APP/cmssoft/cmsset_default.sh ] ;then\n'
913     txt += ' # Use $OSG_APP/cmssoft/cmsset_default.sh to setup cms software\n'
914     txt += ' source $OSG_APP/cmssoft/cmsset_default.sh '+self.version+'\n'
915     txt += ' else\n'
916     txt += ' echo "SET_CMS_ENV 10020 ==> ERROR $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
917     txt += ' echo "JOB_EXIT_STATUS = 10020"\n'
918     txt += ' echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
919     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
920 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
921     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
922     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
923 slacapra 1.1 txt += ' exit 1\n'
924     txt += '\n'
925     txt += ' echo "Remove working directory: $WORKING_DIR"\n'
926     txt += ' cd $RUNTIME_AREA\n'
927     txt += ' /bin/rm -rf $WORKING_DIR\n'
928     txt += ' if [ -d $WORKING_DIR ] ;then\n'
929     txt += ' echo "SET_CMS_ENV 10017 ==> OSG $WORKING_DIR could not be deleted on WN `hostname` after $GRID3_APP_DIR/cmssoft/cmsset_default.sh and $OSG_APP/cmssoft/cmsset_default.sh file not found"\n'
930     txt += ' echo "JOB_EXIT_STATUS = 10017"\n'
931     txt += ' echo "JobExitCode=10017" | tee -a $RUNTIME_AREA/$repo\n'
932     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
933 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
934     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
935     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
936 slacapra 1.1 txt += ' fi\n'
937     txt += '\n'
938     txt += ' exit 1\n'
939     txt += ' fi\n'
940     txt += '\n'
941     txt += ' echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
942     txt += ' echo " END SETUP CMS OSG ENVIRONMENT "\n'
943    
944     return txt
945    
946     ### OLI_DANIELE
947     def wsSetupCMSLCGEnvironment_(self):
948     """
949     Returns part of a job script which is prepares
950     the execution environment and which is common for all CMS jobs.
951     """
952     txt = ' \n'
953     txt += ' echo " ### SETUP CMS LCG ENVIRONMENT ### "\n'
954     txt += ' echo "JOB_EXIT_STATUS = 0"\n'
955     txt += ' if [ ! $VO_CMS_SW_DIR ] ;then\n'
956     txt += ' echo "SET_CMS_ENV 10031 ==> ERROR CMS software dir not found on WN `hostname`"\n'
957     txt += ' echo "JOB_EXIT_STATUS = 10031" \n'
958     txt += ' echo "JobExitCode=10031" | tee -a $RUNTIME_AREA/$repo\n'
959     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
960 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
961     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
962     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
963 slacapra 1.1 txt += ' exit 1\n'
964     txt += ' else\n'
965     txt += ' echo "Sourcing environment... "\n'
966     txt += ' if [ ! -s $VO_CMS_SW_DIR/cmsset_default.sh ] ;then\n'
967     txt += ' echo "SET_CMS_ENV 10020 ==> ERROR cmsset_default.sh file not found into dir $VO_CMS_SW_DIR"\n'
968     txt += ' echo "JOB_EXIT_STATUS = 10020"\n'
969     txt += ' echo "JobExitCode=10020" | tee -a $RUNTIME_AREA/$repo\n'
970     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
971 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
972     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
973     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
974 slacapra 1.1 txt += ' exit 1\n'
975     txt += ' fi\n'
976     txt += ' echo "sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
977     txt += ' source $VO_CMS_SW_DIR/cmsset_default.sh\n'
978     txt += ' result=$?\n'
979     txt += ' if [ $result -ne 0 ]; then\n'
980     txt += ' echo "SET_CMS_ENV 10032 ==> ERROR problem sourcing $VO_CMS_SW_DIR/cmsset_default.sh"\n'
981     txt += ' echo "JOB_EXIT_STATUS = 10032"\n'
982     txt += ' echo "JobExitCode=10032" | tee -a $RUNTIME_AREA/$repo\n'
983     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
984 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
985     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
986     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
987 slacapra 1.1 txt += ' exit 1\n'
988     txt += ' fi\n'
989     txt += ' fi\n'
990     txt += ' \n'
991     txt += ' string=`cat /etc/redhat-release`\n'
992     txt += ' echo $string\n'
993     txt += ' if [[ $string = *alhalla* ]]; then\n'
994     txt += ' echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
995     txt += ' elif [[ $string = *Enterprise* ]] || [[ $string = *cientific* ]]; then\n'
996     txt += ' export SCRAM_ARCH=slc3_ia32_gcc323\n'
997     txt += ' echo "SCRAM_ARCH= $SCRAM_ARCH"\n'
998     txt += ' else\n'
999     txt += ' echo "SET_CMS_ENV 1 ==> ERROR OS unknown, LCG environment not initialized"\n'
1000     txt += ' echo "JOB_EXIT_STATUS = 10033"\n'
1001     txt += ' echo "JobExitCode=10033" | tee -a $RUNTIME_AREA/$repo\n'
1002     txt += ' dumpStatus $RUNTIME_AREA/$repo\n'
1003 gutsche 1.4 txt += ' rm -f $RUNTIME_AREA/$repo \n'
1004     txt += ' echo "MonitorJobID=`echo $MonitorJobID`" | tee -a $RUNTIME_AREA/$repo \n'
1005     txt += ' echo "MonitorID=`echo $MonitorID`" | tee -a $RUNTIME_AREA/$repo\n'
1006 slacapra 1.1 txt += ' exit 1\n'
1007     txt += ' fi\n'
1008     txt += ' echo "SET_CMS_ENV 0 ==> setup cms environment ok"\n'
1009     txt += ' echo "### END SETUP CMS LCG ENVIRONMENT ###"\n'
1010     return txt
1011