39 |
|
self.OSBsize = 50*1000*1000 # 50 MB |
40 |
|
|
41 |
|
self.environment_unique_identifier = None |
42 |
+ |
self.submissionDay = time.strftime("%y%m%d",time.localtime()) |
43 |
|
|
44 |
|
return |
45 |
|
|
52 |
|
SchedulerGrid.configure(self, cfg_params) |
53 |
|
|
54 |
|
self.proxyValid=0 |
55 |
< |
self.dontCheckProxy=int(cfg_params.get("GRID.dont_check_proxy",0)) |
55 |
> |
self.dontCheckProxy=int(cfg_params.get("GRID.dont_check_proxy",'0')) |
56 |
|
self.space_token = cfg_params.get("USER.space_token",None) |
57 |
|
self.proxyServer= 'myproxy.cern.ch' |
58 |
|
self.group = cfg_params.get("GRID.group", None) |
59 |
|
self.role = cfg_params.get("GRID.role", None) |
60 |
|
self.VO = cfg_params.get('GRID.virtual_organization','cms') |
61 |
+ |
self.allowOverflow = cfg_params.get('GRID.allow_overflow', '1') |
62 |
+ |
self.max_rss = cfg_params.get('GRID.max_rss','2300') |
63 |
|
|
64 |
|
self.checkProxy() |
65 |
|
|
83 |
|
msg+="\n Use GRID.se_white_list and/or GRID.se_black_list instead" |
84 |
|
raise CrabException(msg) |
85 |
|
|
86 |
+ |
|
87 |
+ |
# make sure proxy FQAN has not changed since last time |
88 |
+ |
command = "voms-proxy-info -identity -fqan 2>/dev/null" |
89 |
+ |
command += " | head -2" |
90 |
+ |
identity = runCommand(command) |
91 |
+ |
idfile = common.work_space.shareDir() + "GridIdentity" |
92 |
+ |
if os.access(idfile, os.F_OK) : |
93 |
+ |
# identity file exists from previous commands |
94 |
+ |
f=open(idfile, 'r') |
95 |
+ |
idFromFile=f.read() |
96 |
+ |
f.close() |
97 |
+ |
else : |
98 |
+ |
# create it |
99 |
+ |
f=open(idfile, 'w') |
100 |
+ |
f.write(identity) |
101 |
+ |
f.close() |
102 |
+ |
idFromFile = identity |
103 |
+ |
|
104 |
+ |
if identity != idFromFile: |
105 |
+ |
msg = "Wrong Grid Credentials:\n%s" % identity |
106 |
+ |
msg += "\nMake sure you have " |
107 |
+ |
msg += " DN, FQAN =\n%s" % idFromFile |
108 |
+ |
raise CrabException(msg) |
109 |
+ |
|
110 |
|
return |
111 |
|
|
112 |
|
def userName(self): |
125 |
|
by $CRABPYTHON/Scheduler.py |
126 |
|
""" |
127 |
|
|
128 |
< |
#SB paste from crab ScheduerGlidein |
128 |
> |
#SB paste from crab SchedulerGlidein |
129 |
|
|
130 |
|
jobParams = "" |
131 |
|
|
152 |
|
jobParams += '+DESIRED_CMSScramArch ="' +scramArch+'";' |
153 |
|
|
154 |
|
myscheddName = self.remoteHost |
155 |
< |
jobParams += '+Glidein_MonitorID = "https://'+ myscheddName + '//$(Cluster).$(Process)"; ' |
155 |
> |
jobParams += '+Glidein_MonitorID = "https://'+ myscheddName + \ |
156 |
> |
'//' + self.submissionDay + '//$(Cluster).$(Process)"; ' |
157 |
|
|
158 |
|
if (self.EDG_clock_time): |
159 |
< |
jobParams += '+MaxWallTimeMins = '+self.EDG_clock_time+'; ' |
159 |
> |
glideinTime = "%d" % (int(self.EDG_clock_time)+5) # 5 min to wrapup |
160 |
> |
jobParams += '+MaxWallTimeMins = '+ glideinTime + '; ' |
161 |
|
else: |
162 |
< |
jobParams += '+MaxWallTimeMins = %d; ' % (60*24) |
162 |
> |
jobParams += '+MaxWallTimeMins = %d; ' % (60*22 - 5) # 22h default in glidein, 5min to wrap |
163 |
> |
|
164 |
> |
if self.max_rss : |
165 |
> |
jobParams += 'request_memory = '+self.max_rss+';' |
166 |
> |
|
167 |
> |
if self.allowOverflow == "0": |
168 |
> |
jobParams += '+CMS_ALLOW_OVERFLOW = False; ' |
169 |
> |
|
170 |
> |
if self.EDG_addJdlParam: |
171 |
> |
if self.EDG_addJdlParam[-1] == '': |
172 |
> |
self.EDG_addJdlParam = self.EDG_addJdlParam[:-1] |
173 |
> |
for p in self.EDG_addJdlParam: |
174 |
> |
jobParams += p.strip()+';\n' |
175 |
|
|
176 |
|
common._db.updateTask_({'jobType':jobParams}) |
177 |
|
|
193 |
|
|
194 |
|
params = {'shareDir':shareDir, |
195 |
|
'jobDir':jobDir, |
196 |
< |
'taskDir':taskDir} |
196 |
> |
'taskDir':taskDir, |
197 |
> |
'submissionDay':self.submissionDay} |
198 |
|
|
199 |
|
return params |
200 |
|
|
239 |
|
|
240 |
|
txt += 'func_exit() { \n' |
241 |
|
txt += self.wsExitFunc_common() |
242 |
< |
|
242 |
> |
txt += '#Check for stdout/err in new location as of condor 7.7\n' |
243 |
> |
txt += ' if [ -s _condor_stdout ]; then\n' |
244 |
> |
txt += ' echo "Found _condor_stdout/err, rename for OSB"\n' |
245 |
> |
txt += ' cp -pfv _condor_stdout CMSSW_${NJob}.stdout\n' |
246 |
> |
txt += ' cp -pfv _condor_stderr CMSSW_${NJob}.stderr\n' |
247 |
> |
txt += ' fi\n' |
248 |
|
txt += ' tar zcvf ${out_files}.tgz ${final_list}\n' |
249 |
|
txt += ' tmp_size=`ls -gGrta ${out_files}.tgz | awk \'{ print $3 }\'`\n' |
250 |
|
txt += ' rm ${out_files}.tgz\n' |
311 |
|
else: |
312 |
|
# pick from Available Servers List |
313 |
|
srvCfg=ServerConfig('default').config() |
267 |
– |
print srvCfg |
314 |
|
remoteHost = srvCfg['serverName'] |
315 |
|
common.logger.info("remotehost from Avail.List = %s" % remoteHost) |
316 |
|
|
317 |
|
if not remoteHost: |
318 |
|
raise CrabException('FATAL ERROR: remoteHost not defined') |
319 |
|
|
320 |
< |
common.logger.info("try to find out username for remote Host via uberftp ...") |
321 |
< |
command="uberftp %s pwd|grep User|awk '{print $3}'" % remoteHost |
322 |
< |
(status, output) = commands.getstatusoutput(command) |
323 |
< |
if status == 0: |
324 |
< |
remoteUser = output |
325 |
< |
common.logger.info("remoteUser set to %s" % remoteUser) |
326 |
< |
if remoteUser==None: |
327 |
< |
raise CrabException('FATAL ERROR: REMOTE USER not defined') |
320 |
> |
#common.logger.info("try to find out username for remote Host via uberftp ...") |
321 |
> |
#command="uberftp %s pwd|grep User|awk '{print $3}'" % remoteHost |
322 |
> |
#(status, output) = commands.getstatusoutput(command) |
323 |
> |
#if status == 0: |
324 |
> |
# remoteUser = output |
325 |
> |
# common.logger.info("remoteUser set to %s" % remoteUser) |
326 |
> |
# if remoteUser==None: |
327 |
> |
# raise CrabException('FATAL ERROR: REMOTE USER not defined') |
328 |
|
|
329 |
< |
remoteUserHost = remoteUser + '@' + remoteHost |
329 |
> |
#remoteUserHost = remoteUser + '@' + remoteHost |
330 |
> |
remoteUserHost = remoteHost |
331 |
|
|
332 |
|
common._db.updateTask_({'serverName':remoteUserHost}) |
333 |
|
|