5 |
|
from SchedulerGrid import SchedulerGrid |
6 |
|
from crab_exceptions import CrabException |
7 |
|
from crab_util import runCommand |
8 |
– |
#from WMCore.SiteScreening.BlackWhiteListParser import CEBlackWhiteListParser |
8 |
|
from WMCore.SiteScreening.BlackWhiteListParser import SEBlackWhiteListParser |
9 |
< |
|
9 |
> |
import Scram |
10 |
|
|
11 |
|
|
12 |
|
import common |
13 |
|
import os |
14 |
|
import socket |
15 |
+ |
import re |
16 |
|
|
17 |
|
# FUTURE: for python 2.4 & 2.6 |
18 |
|
try: |
30 |
|
|
31 |
|
def __init__(self): |
32 |
|
SchedulerGrid.__init__(self,"RCONDOR") |
33 |
+ |
self.rcondorHost = os.getenv('RCONDOR_HOST') |
34 |
+ |
if self.rcondorHost == None: |
35 |
+ |
raise CrabException('FATAL ERROR: env.var RCONDOR_HOST not defined') |
36 |
|
self.datasetPath = None |
37 |
|
self.selectNoInput = None |
38 |
|
self.OSBsize = 50*1000*1000 # 50 MB |
39 |
|
|
40 |
|
self.environment_unique_identifier = None |
41 |
+ |
|
42 |
|
return |
43 |
|
|
44 |
|
|
52 |
|
self.proxyValid=0 |
53 |
|
self.dontCheckProxy=int(cfg_params.get("GRID.dont_check_proxy",0)) |
54 |
|
self.space_token = cfg_params.get("USER.space_token",None) |
55 |
< |
try: |
52 |
< |
self.proxyServer = Downloader("http://cmsdoc.cern.ch/cms/LCG/crab/config/").config("myproxy_server.conf") |
53 |
< |
self.proxyServer = self.proxyServer.strip() |
54 |
< |
if self.proxyServer is None: |
55 |
< |
raise CrabException("myproxy_server.conf retrieved but empty") |
56 |
< |
except Exception, e: |
57 |
< |
common.logger.info("Problem setting myproxy server endpoint: using myproxy.cern.ch") |
58 |
< |
common.logger.debug(e) |
59 |
< |
self.proxyServer= 'myproxy.cern.ch' |
55 |
> |
self.proxyServer= 'myproxy.cern.ch' |
56 |
|
self.group = cfg_params.get("GRID.group", None) |
57 |
|
self.role = cfg_params.get("GRID.role", None) |
58 |
|
self.VO = cfg_params.get('GRID.virtual_organization','cms') |
59 |
< |
|
59 |
> |
|
60 |
|
try: |
61 |
|
tmp = cfg_params['CMSSW.datasetpath'] |
62 |
|
if tmp.lower() == 'none': |
69 |
|
msg = "Error: datasetpath not defined " |
70 |
|
raise CrabException(msg) |
71 |
|
|
72 |
+ |
if cfg_params.get('GRID.ce_black_list', None) or \ |
73 |
+ |
cfg_params.get('GRID.ce_white_list', None) : |
74 |
+ |
msg="BEWARE: scheduler RGLIDEIN ignores CE black/white lists." |
75 |
+ |
msg+="\n Remove them from crab configuration to proceed." |
76 |
+ |
msg+="\n Use GRID.se_white_list and/or GRID.se_black_list instead" |
77 |
+ |
raise CrabException(msg) |
78 |
+ |
|
79 |
|
self.checkProxy() |
80 |
|
|
81 |
|
return |
101 |
|
|
102 |
|
seDest = task.jobs[i-1]['dlsDestination'] |
103 |
|
|
104 |
+ |
if seDest == [''] : |
105 |
+ |
seDest = self.blackWhiteListParser.expandList("T") # all of SiteDB |
106 |
+ |
|
107 |
|
seString=self.blackWhiteListParser.cleanForBlackWhiteList(seDest) |
102 |
– |
print "SB: SE destinations AfterBlak and White: ", seString |
108 |
|
|
109 |
|
jobParams += '+DESIRED_SEs = "'+seString+'"; ' |
110 |
|
|
111 |
+ |
scram = Scram.Scram(None) |
112 |
+ |
cmsVersion = scram.getSWVersion() |
113 |
+ |
scramArch = scram.getArch() |
114 |
+ |
|
115 |
+ |
cmsver=re.split('_', cmsVersion) |
116 |
+ |
numericCmsVersion = "%s%.2d%.2d" %(cmsver[1], int(cmsver[2]), int(cmsver[3])) |
117 |
+ |
|
118 |
+ |
jobParams += '+DESIRED_CMSVersion ="' +cmsVersion+'";' |
119 |
+ |
jobParams += '+DESIRED_CMSVersionNr ="' +numericCmsVersion+'";' |
120 |
+ |
jobParams += '+DESIRED_CMSScramArch ="' +scramArch+'";' |
121 |
+ |
|
122 |
+ |
myschedName = self.rcondorHost |
123 |
+ |
jobParams += '+Glidein_MonitorID = "https://'+ myschedName + '//$(Cluster).$(Process)"; ' |
124 |
+ |
|
125 |
|
if (self.EDG_clock_time): |
126 |
|
jobParams += '+MaxWallTimeMins = '+self.EDG_clock_time+'; ' |
127 |
|
else: |
138 |
|
Return dictionary with specific parameters, to use with real scheduler |
139 |
|
is called when scheduler is initialized in Boss, i.e. at each crab command |
140 |
|
""" |
141 |
+ |
#SB this method is used to pass directory names to Boss Scheduler |
142 |
+ |
# via params dictionary |
143 |
|
|
123 |
– |
tmpDir = os.path.join(common.work_space.shareDir(),'.condor_temp') |
124 |
– |
tmpDir = os.path.join(common.work_space.shareDir(),'.condor_temp') |
144 |
|
jobDir = common.work_space.jobDir() |
126 |
– |
|
145 |
|
taskDir=common.work_space.topDir().split('/')[-2] |
146 |
< |
rcondorDir = "/afs/cern.ch/user/b/belforte/w0/crabtest/rc/igor/" |
147 |
< |
rcondorDir ='%s/.rcondor/mount/' % os.getenv('HOME') |
130 |
< |
tmpDir = os.path.join(rcondorDir,taskDir) |
131 |
< |
tmpDir = os.path.join(tmpDir,'condor_temp') |
146 |
> |
shareDir = common.work_space.shareDir() |
147 |
> |
#SBtmpDir = common.work_space.tmpDir() |
148 |
|
|
149 |
< |
params = {'tmpDir':tmpDir, |
150 |
< |
'jobDir':jobDir} |
149 |
> |
params = {'rcondorHost':self.rcondorHost, |
150 |
> |
'shareDir':shareDir, |
151 |
> |
#SB'tmpDir':tmpDir, |
152 |
> |
'jobDir':jobDir, |
153 |
> |
'taskDir':taskDir} |
154 |
|
|
155 |
|
return params |
156 |
|
|
196 |
|
txt += 'func_exit() { \n' |
197 |
|
txt += self.wsExitFunc_common() |
198 |
|
|
180 |
– |
#txt += ' cp ${out_files}.tgz $_CONDOR_SCRATCH_DIR/\n' |
181 |
– |
#txt += ' cp CMSSW_$NJob.stdout $_CONDOR_SCRATCH_DIR/\n' |
182 |
– |
#txt += ' cp CMSSW_$NJob.stderr $_CONDOR_SCRATCH_DIR/\n' |
183 |
– |
#txt += ' cp Watchdog_$NJob.log.gz $_CONDOR_SCRATCH_DIR/\n' |
184 |
– |
#txt += ' cp crab_fjr_$NJob.xml $_CONDOR_SCRATCH_DIR/\n' |
185 |
– |
|
186 |
– |
|
187 |
– |
### specific Glite check for OSB |
199 |
|
txt += ' tar zcvf ${out_files}.tgz ${final_list}\n' |
200 |
|
txt += ' tmp_size=`ls -gGrta ${out_files}.tgz | awk \'{ print $3 }\'`\n' |
201 |
|
txt += ' rm ${out_files}.tgz\n' |
239 |
|
common._db.updateTask_(taskReq) |
240 |
|
|
241 |
|
|
231 |
– |
# presa di brutto da SchedulerGrid.py |
232 |
– |
""" |
233 |
– |
|
234 |
– |
def wsSetupEnvironment(self): |
235 |
– |
|
236 |
– |
#Returns part of a job script which does scheduler-specific work. |
237 |
– |
|
238 |
– |
taskId =common._db.queryTask('name') |
239 |
– |
index = int(common._db.nJobs()) |
240 |
– |
job = common.job_list[index-1] |
241 |
– |
jbt = job.type() |
242 |
– |
if not self.environment_unique_identifier: |
243 |
– |
try : |
244 |
– |
self.environment_unique_identifier = self.envUniqueID() |
245 |
– |
except : |
246 |
– |
raise CrabException('environment_unique_identifier not set') |
247 |
– |
|
248 |
– |
# start with wrapper timing |
249 |
– |
txt = 'export TIME_WRAP_INI=`date +%s` \n' |
250 |
– |
txt += 'export TIME_STAGEOUT=-2 \n\n' |
251 |
– |
txt += '# '+self.name()+' specific stuff\n' |
252 |
– |
txt += '# strip arguments\n' |
253 |
– |
txt += 'echo "strip arguments"\n' |
254 |
– |
txt += 'args=("$@")\n' |
255 |
– |
txt += 'nargs=$#\n' |
256 |
– |
txt += 'shift $nargs\n' |
257 |
– |
txt += "# job number (first parameter for job wrapper)\n" |
258 |
– |
txt += "NJob=${args[0]}; export NJob\n" |
259 |
– |
txt += "NResub=${args[1]}; export NResub\n" |
260 |
– |
txt += "NRand=`getRandSeed`; export NRand\n" |
261 |
– |
# append random code |
262 |
– |
txt += 'OutUniqueID=_$NRand\n' |
263 |
– |
txt += 'OutUniqueID=_$NResub$OutUniqueID\n' |
264 |
– |
txt += 'OutUniqueID=$NJob$OutUniqueID; export OutUniqueID\n' |
265 |
– |
txt += 'CRAB_UNIQUE_JOB_ID=%s_${OutUniqueID}; export CRAB_UNIQUE_JOB_ID\n' % taskId |
266 |
– |
txt += 'echo env var CRAB_UNIQUE_JOB_ID set to: ${CRAB_UNIQUE_JOB_ID}\n' |
267 |
– |
# if we want to prepend |
268 |
– |
#txt += 'OutUniqueID=_$NResub\n' |
269 |
– |
#txt += 'OutUniqueID=_$NJob$OutUniqueID\n' |
270 |
– |
#txt += 'OutUniqueID=$NRand$OutUniqueID; export OutUniqueID\n' |
271 |
– |
|
272 |
– |
txt += "out_files=out_files_${NJob}; export out_files\n" |
273 |
– |
txt += "echo $out_files\n" |
274 |
– |
txt += jbt.outList() |
275 |
– |
# txt += 'if [ $JobRunCount ] && [ `expr $JobRunCount - 1` -gt 0 ] && [ $Glidein_MonitorID ]; then \n' |
276 |
– |
txt += 'if [ $Glidein_MonitorID ]; then \n' |
277 |
– |
# txt += ' attempt=`expr $JobRunCount - 1` \n' |
278 |
– |
# txt += ' MonitorJobID=${NJob}_${Glidein_MonitorID}__${attempt}\n' |
279 |
– |
# txt += ' SyncGridJobId=${Glidein_MonitorID}__${attempt}\n' |
280 |
– |
txt += ' MonitorJobID=${NJob}_${Glidein_MonitorID}\n' |
281 |
– |
txt += ' SyncGridJobId=${Glidein_MonitorID}\n' |
282 |
– |
txt += 'else \n' |
283 |
– |
txt += ' MonitorJobID=${NJob}_'+self.environment_unique_identifier+'\n' |
284 |
– |
txt += ' SyncGridJobId='+self.environment_unique_identifier+'\n' |
285 |
– |
txt += 'fi\n' |
286 |
– |
txt += 'MonitorID='+taskId+'\n' |
287 |
– |
txt += 'echo "MonitorJobID=$MonitorJobID" > $RUNTIME_AREA/$repo \n' |
288 |
– |
txt += 'echo "SyncGridJobId=$SyncGridJobId" >> $RUNTIME_AREA/$repo \n' |
289 |
– |
txt += 'echo "MonitorID=$MonitorID" >> $RUNTIME_AREA/$repo\n' |
290 |
– |
|
291 |
– |
txt += 'echo ">>> GridFlavour discovery: " \n' |
292 |
– |
txt += 'if [ $OSG_GRID ]; then \n' |
293 |
– |
txt += ' middleware=OSG \n' |
294 |
– |
txt += ' if [ $OSG_JOB_CONTACT ]; then \n' |
295 |
– |
txt += ' SyncCE="$OSG_JOB_CONTACT"; \n' |
296 |
– |
txt += ' echo "SyncCE=$SyncCE" >> $RUNTIME_AREA/$repo ;\n' |
297 |
– |
txt += ' else\n' |
298 |
– |
txt += ' echo "not reporting SyncCE";\n' |
299 |
– |
txt += ' fi\n'; |
300 |
– |
txt += ' echo "GridFlavour=$middleware" | tee -a $RUNTIME_AREA/$repo \n' |
301 |
– |
txt += ' echo "source OSG GRID setup script" \n' |
302 |
– |
txt += ' source $OSG_GRID/setup.sh \n' |
303 |
– |
txt += 'elif [ $NORDUGRID_CE ]; then \n' # We look for $NORDUGRID_CE before $VO_CMS_SW_DIR, |
304 |
– |
txt += ' middleware=ARC \n' # because the latter is defined for ARC too |
305 |
– |
txt += ' echo "SyncCE=${NORDUGRID_CE}:2811/nordugrid-GE-${QUEUE:-queue}" >> $RUNTIME_AREA/$repo \n' |
306 |
– |
txt += ' echo "GridFlavour=$middleware" | tee -a $RUNTIME_AREA/$repo \n' |
307 |
– |
txt += 'elif [ $VO_CMS_SW_DIR ]; then \n' |
308 |
– |
txt += ' middleware=LCG \n' |
309 |
– |
txt += ' if [ $GLIDEIN_Gatekeeper ]; then \n' |
310 |
– |
txt += ' echo "SyncCE=`echo $GLIDEIN_Gatekeeper | sed -e s/:2119//`" >> $RUNTIME_AREA/$repo \n' |
311 |
– |
txt += ' else \n' |
312 |
– |
txt += ' echo "SyncCE=`glite-brokerinfo getCE`" >> $RUNTIME_AREA/$repo \n' |
313 |
– |
txt += ' fi \n' |
314 |
– |
txt += ' echo "GridFlavour=$middleware" | tee -a $RUNTIME_AREA/$repo \n' |
315 |
– |
txt += 'else \n' |
316 |
– |
txt += ' echo "ERROR ==> GridFlavour not identified" \n' |
317 |
– |
txt += ' job_exit_code=10030 \n' |
318 |
– |
txt += ' func_exit \n' |
319 |
– |
txt += 'fi \n' |
320 |
– |
|
321 |
– |
txt += 'dumpStatus $RUNTIME_AREA/$repo \n' |
322 |
– |
txt += '\n\n' |
323 |
– |
|
324 |
– |
|
325 |
– |
txt += 'export VO='+self.VO+'\n' |
326 |
– |
txt += 'if [ $middleware == LCG ]; then\n' |
327 |
– |
txt += ' if [ $GLIDEIN_Gatekeeper ]; then\n' |
328 |
– |
txt += ' CloseCEs=$GLIDEIN_Gatekeeper \n' |
329 |
– |
txt += ' else\n' |
330 |
– |
txt += ' CloseCEs=`glite-brokerinfo getCE`\n' |
331 |
– |
txt += ' fi\n' |
332 |
– |
txt += ' echo "CloseCEs = $CloseCEs"\n' |
333 |
– |
txt += ' CE=`echo $CloseCEs | sed -e "s/:.*//"`\n' |
334 |
– |
txt += ' echo "CE = $CE"\n' |
335 |
– |
txt += 'elif [ $middleware == OSG ]; then \n' |
336 |
– |
txt += ' if [ $OSG_JOB_CONTACT ]; then \n' |
337 |
– |
txt += ' CE=`echo $OSG_JOB_CONTACT | /usr/bin/awk -F\/ \'{print $1}\'` \n' |
338 |
– |
txt += ' else \n' |
339 |
– |
txt += ' echo "ERROR ==> OSG mode in setting CE name from OSG_JOB_CONTACT" \n' |
340 |
– |
txt += ' job_exit_code=10099\n' |
341 |
– |
txt += ' func_exit\n' |
342 |
– |
txt += ' fi \n' |
343 |
– |
txt += 'elif [ $middleware == ARC ]; then \n' |
344 |
– |
txt += ' echo "CE = $NORDUGRID_CE"\n' |
345 |
– |
txt += 'fi \n' |
346 |
– |
|
347 |
– |
return txt |
348 |
– |
""" |