|
1
|
|
|
import sys |
|
2
|
|
|
import os |
|
3
|
|
|
from subprocess import Popen, PIPE, check_output |
|
|
|
|
|
|
4
|
|
|
import time |
|
5
|
|
|
import uuid |
|
6
|
|
|
from fabric.api import env, run, cd, get, hide, settings, remote_tunnel, show |
|
|
|
|
|
|
7
|
|
|
from fabric.tasks import execute |
|
|
|
|
|
|
8
|
|
|
from fabric.decorators import with_settings |
|
|
|
|
|
|
9
|
|
|
from datetime import timedelta |
|
10
|
|
|
from os.path import join as pj |
|
11
|
|
|
|
|
12
|
|
|
from jinja2 import Environment, FileSystemLoader |
|
|
|
|
|
|
13
|
|
|
|
|
14
|
|
|
JOB_SCHEDULERS = ('SGE', 'SLURM', 'LSF', |
|
15
|
|
|
'PBS', 'TORQUE', 'MAUI', 'LOADLEVELER') |
|
16
|
|
|
|
|
17
|
|
|
scheduler = None |
|
18
|
|
|
job_db = None |
|
19
|
|
|
|
|
20
|
|
|
|
|
21
|
|
|
def get_data(filename): |
|
22
|
|
|
packagedir = os.path.dirname(__file__) |
|
23
|
|
|
dirname = pj(packagedir, '..', 'share', 'MyCluster') |
|
24
|
|
|
fullname = os.path.join(dirname, filename) |
|
25
|
|
|
# Need to check if file exists as |
|
26
|
|
|
# share location may also be sys.prefix/share |
|
27
|
|
|
if not os.path.isfile(fullname): |
|
28
|
|
|
dirname = pj(sys.prefix, 'share', 'MyCluster') |
|
29
|
|
|
fullname = os.path.join(dirname, filename) |
|
30
|
|
|
|
|
31
|
|
|
return fullname |
|
32
|
|
|
|
|
33
|
|
|
|
|
34
|
|
|
def load_template(template_name): |
|
35
|
|
|
env = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates'))) |
|
|
|
|
|
|
36
|
|
|
return env.get_template(template_name) |
|
37
|
|
|
|
|
38
|
|
|
|
|
39
|
|
|
def detect_scheduling_sys(): |
|
40
|
|
|
|
|
41
|
|
|
# Test for custom scheduler |
|
42
|
|
|
if os.getenv('MYCLUSTER_SCHED') is not None: |
|
43
|
|
|
return my_import(os.getenv('MYCLUSTER_SCHED')) |
|
44
|
|
|
|
|
45
|
|
|
# Test for SLURM |
|
46
|
|
|
if os.getenv('SLURMHOME') is not None: |
|
47
|
|
|
return my_import('mycluster.slurm') |
|
48
|
|
|
|
|
49
|
|
|
try: |
|
50
|
|
|
line = check_output(['scontrol', 'ping']) |
|
51
|
|
|
if line.split('(')[0] == 'Slurmctld': |
|
52
|
|
|
return my_import('mycluster.slurm') |
|
53
|
|
|
except: |
|
|
|
|
|
|
54
|
|
|
pass |
|
55
|
|
|
|
|
56
|
|
|
# Test for PBS |
|
57
|
|
|
try: |
|
58
|
|
|
line = check_output(['pbsnodes', '-a']) |
|
59
|
|
|
return my_import('mycluster.pbs') |
|
60
|
|
|
except: |
|
|
|
|
|
|
61
|
|
|
pass |
|
62
|
|
|
|
|
63
|
|
|
# Test for SGE |
|
64
|
|
|
if os.getenv('SGE_CLUSTER_NAME') is not None: |
|
65
|
|
|
return my_import('mycluster.sge') |
|
66
|
|
|
|
|
67
|
|
|
# Test for lsf |
|
68
|
|
|
try: |
|
69
|
|
|
line = check_output('lsid') |
|
70
|
|
|
if line.split(' ')[0] == 'Platform' or line.split(' ')[0] == 'IBM': |
|
71
|
|
|
return my_import('mycluster.lsf') |
|
72
|
|
|
except: |
|
|
|
|
|
|
73
|
|
|
pass |
|
74
|
|
|
|
|
75
|
|
|
return None |
|
76
|
|
|
|
|
77
|
|
|
|
|
78
|
|
|
def queues(): |
|
79
|
|
|
if scheduler is not None: |
|
80
|
|
|
return scheduler.queues() |
|
81
|
|
|
else: |
|
82
|
|
|
return [] |
|
83
|
|
|
|
|
84
|
|
|
def accounts(): |
|
85
|
|
|
if scheduler is not None: |
|
86
|
|
|
return scheduler.accounts() |
|
87
|
|
|
else: |
|
88
|
|
|
return [] |
|
89
|
|
|
|
|
90
|
|
|
|
|
91
|
|
|
def remote_sites(): |
|
92
|
|
|
if job_db is not None: |
|
93
|
|
|
return job_db.remote_site_db |
|
94
|
|
|
else: |
|
95
|
|
|
return [] |
|
96
|
|
|
|
|
97
|
|
|
|
|
98
|
|
|
@with_settings(warn_only=True) |
|
99
|
|
|
def remote_cmd(): |
|
100
|
|
|
output_file = '~/.mycluster/' + str(uuid.uuid4()) |
|
101
|
|
|
with hide('output', 'running', 'warnings'), settings(warn_only=True): |
|
102
|
|
|
run('mycluster -p >' + output_file, pty=False) |
|
103
|
|
|
import StringIO |
|
|
|
|
|
|
104
|
|
|
contents = StringIO.StringIO() |
|
105
|
|
|
get(output_file, contents) |
|
106
|
|
|
# operate on 'contents' like a file object here, e.g. 'print |
|
107
|
|
|
return contents.getvalue() |
|
108
|
|
|
|
|
109
|
|
|
|
|
110
|
|
|
def remote_job_list(site): |
|
111
|
|
|
env.use_ssh_config = True |
|
112
|
|
|
return execute(remote_cmd, hosts=[site]) |
|
113
|
|
|
|
|
114
|
|
|
|
|
115
|
|
|
def print_timedelta(td): |
|
116
|
|
|
if (td.days > 0): |
|
|
|
|
|
|
117
|
|
|
if td.days > 1: |
|
118
|
|
|
out = str(td).replace(" days, ", ":") |
|
119
|
|
|
else: |
|
120
|
|
|
out = str(td).replace(" day, ", ":") |
|
121
|
|
|
else: |
|
122
|
|
|
out = "0:" + str(td) |
|
123
|
|
|
outAr = out.split(':') |
|
124
|
|
|
outAr = ["%02d" % (int(float(x))) for x in outAr] |
|
125
|
|
|
out = ":".join(outAr) |
|
126
|
|
|
return out |
|
127
|
|
|
|
|
128
|
|
|
|
|
129
|
|
|
def get_timedelta(date_str): |
|
130
|
|
|
# Returns timedelta object from string in [DD-[hh:]]mm:ss format |
|
131
|
|
|
days = 0 |
|
132
|
|
|
hours = 0 |
|
133
|
|
|
minutes = 0 |
|
134
|
|
|
seconds = 0 |
|
135
|
|
|
|
|
136
|
|
|
if date_str.count('-') == 1: |
|
137
|
|
|
days = int(date_str.split('-')[0]) |
|
138
|
|
|
date_str = date_str.partition('-')[2] |
|
139
|
|
|
if date_str.count(':') == 2: |
|
140
|
|
|
hours = int(date_str.split(':')[0]) |
|
141
|
|
|
date_str = date_str.partition(':')[2] |
|
142
|
|
|
|
|
143
|
|
|
try: |
|
144
|
|
|
minutes = int(date_str.split(':')[0]) |
|
145
|
|
|
seconds = int(date_str.split(':')[1]) |
|
146
|
|
|
except: |
|
|
|
|
|
|
147
|
|
|
pass |
|
148
|
|
|
|
|
149
|
|
|
return timedelta(days=days, |
|
150
|
|
|
hours=hours, |
|
151
|
|
|
minutes=minutes, |
|
152
|
|
|
seconds=seconds |
|
153
|
|
|
) |
|
154
|
|
|
|
|
155
|
|
|
|
|
156
|
|
|
def get_stats_time(stats): |
|
157
|
|
|
|
|
158
|
|
|
wallclock = '-' if 'wallclock' not in stats else stats['wallclock'] |
|
159
|
|
|
wallclock_delta = None |
|
160
|
|
|
cputime_delta = None |
|
161
|
|
|
if wallclock != '-': |
|
162
|
|
|
try: |
|
163
|
|
|
wallclock_delta = wallclock |
|
164
|
|
|
wallclock = print_timedelta(wallclock_delta) |
|
165
|
|
|
except: |
|
|
|
|
|
|
166
|
|
|
pass |
|
167
|
|
|
cputime = '-' if 'cpu' not in stats else stats['cpu'] |
|
168
|
|
|
if cputime != '-': |
|
169
|
|
|
try: |
|
170
|
|
|
cputime_delta = cputime |
|
171
|
|
|
cputime = print_timedelta(cputime_delta) |
|
172
|
|
|
except: |
|
|
|
|
|
|
173
|
|
|
pass |
|
174
|
|
|
|
|
175
|
|
|
time_ratio = None |
|
176
|
|
|
if cputime_delta and wallclock_delta: |
|
177
|
|
|
time_ratio = (float(cputime_delta.total_seconds()) / |
|
178
|
|
|
wallclock_delta.total_seconds()) |
|
179
|
|
|
|
|
180
|
|
|
return cputime, wallclock, time_ratio |
|
181
|
|
|
|
|
182
|
|
|
|
|
183
|
|
|
def printjobs(num_lines): |
|
184
|
|
|
print('User name: {0} {1}'.format(job_db.user_db['user'].first_name, |
|
185
|
|
|
job_db.user_db['user'].last_name)) |
|
186
|
|
|
jobs = job_list() |
|
187
|
|
|
print(' | {0:^10} | {1:^10} |\ |
|
188
|
|
|
{2:^10} | {3:^12} | {4:^12} |\ |
|
189
|
|
|
{5:^5} | {6:^20} | {7:50}'.format('Job ID', |
|
190
|
|
|
'Status', |
|
191
|
|
|
'NTasks', |
|
192
|
|
|
'CPU Time', |
|
193
|
|
|
'Wallclock', |
|
194
|
|
|
'Util %', |
|
195
|
|
|
'Job Name', |
|
196
|
|
|
'Job Dir',) |
|
197
|
|
|
) |
|
198
|
|
|
for i, j in enumerate(jobs): |
|
199
|
|
|
job_id = jobs[j].job_id |
|
200
|
|
|
status = jobs[j].status |
|
201
|
|
|
# queue = jobs[j].queue |
|
202
|
|
|
# site_name = job_db.queue_db[queue].site_name |
|
203
|
|
|
# scheduler_type = job_db.site_db[site_name].scheduler_type |
|
204
|
|
|
cputime, wallclock, time_ratio = get_stats_time(jobs[j].stats) |
|
205
|
|
|
efficiency = '-' |
|
206
|
|
|
if time_ratio: |
|
207
|
|
|
try: |
|
208
|
|
|
efficiency = (time_ratio / (int(jobs[j].num_tasks) * |
|
209
|
|
|
int(jobs[j].threads_per_task)) * 100.0) |
|
210
|
|
|
efficiency = '{:.1f}'.format(efficiency) |
|
211
|
|
|
except: |
|
|
|
|
|
|
212
|
|
|
pass |
|
213
|
|
|
|
|
214
|
|
|
if status == 'completed': |
|
215
|
|
|
print('{0:4} | {1:^10} |\ |
|
216
|
|
|
{2:^10} | {3:^10} |\ |
|
217
|
|
|
{4:^12} | {5:^12} |\ |
|
218
|
|
|
{6:^5} | {7:^20} | {8:50}'.format(i + 1, |
|
219
|
|
|
job_id, |
|
220
|
|
|
status, |
|
221
|
|
|
str(jobs[j].num_tasks) + |
|
222
|
|
|
' (' + |
|
223
|
|
|
str(jobs[j].threads_per_task) + |
|
|
|
|
|
|
224
|
|
|
')', |
|
225
|
|
|
cputime, |
|
226
|
|
|
wallclock, |
|
227
|
|
|
efficiency, |
|
228
|
|
|
jobs[j].job_name, |
|
229
|
|
|
jobs[j].job_dir) |
|
230
|
|
|
) |
|
231
|
|
|
elif status == 'running': |
|
232
|
|
|
stats = scheduler.running_stats(job_id) |
|
233
|
|
|
cputime, wallclock, time_ratio = get_stats_time(stats) |
|
234
|
|
|
efficiency = '-' |
|
235
|
|
|
if time_ratio: |
|
236
|
|
|
try: |
|
237
|
|
|
efficiency = (time_ratio / (int(jobs[j].num_tasks) * |
|
238
|
|
|
int(jobs[j].threads_per_task)) * 100.0) |
|
239
|
|
|
efficiency = '{:.1f}'.format(efficiency) |
|
240
|
|
|
except: |
|
|
|
|
|
|
241
|
|
|
pass |
|
242
|
|
|
print('{0:4} | {1:^10} | {2:^10} |\ |
|
243
|
|
|
{3:^10} | {4:^12} | {5:^12} |\ |
|
244
|
|
|
{6:^5} | {7:^20} | {8:50}'.format(i + 1, |
|
245
|
|
|
job_id, |
|
246
|
|
|
status, |
|
247
|
|
|
str(jobs[j].num_tasks) + |
|
248
|
|
|
' (' + |
|
249
|
|
|
str(jobs[j].threads_per_task) + |
|
|
|
|
|
|
250
|
|
|
')', |
|
251
|
|
|
cputime, |
|
252
|
|
|
wallclock, |
|
253
|
|
|
efficiency, |
|
254
|
|
|
jobs[j].job_name, |
|
255
|
|
|
jobs[j].job_dir) |
|
256
|
|
|
) |
|
257
|
|
|
else: |
|
258
|
|
|
print('{0:4} | {1:^10} | {2:^10} |\ |
|
259
|
|
|
{3:^10} | {4:^12} | {5:^12} |\ |
|
260
|
|
|
{6:^5} | {7:^20} | {8:50}'.format(i + 1, |
|
261
|
|
|
job_id, |
|
262
|
|
|
status, |
|
263
|
|
|
str(jobs[j].num_tasks) + |
|
264
|
|
|
' (' + |
|
265
|
|
|
str(jobs[j].threads_per_task) + |
|
|
|
|
|
|
266
|
|
|
')', |
|
267
|
|
|
'-', |
|
268
|
|
|
'-', |
|
269
|
|
|
efficiency, |
|
270
|
|
|
jobs[j].job_name, |
|
271
|
|
|
jobs[j].job_dir) |
|
272
|
|
|
) |
|
273
|
|
|
|
|
274
|
|
|
remotes = remote_sites() |
|
275
|
|
|
for i, j in enumerate(remotes): |
|
276
|
|
|
print('Remote Site: ' + remotes[j].name) |
|
277
|
|
|
remote_list = remote_job_list(remotes[j].user + '@' + remotes[j].name) |
|
278
|
|
|
for r in remote_list: |
|
279
|
|
|
print(remote_list[r]) |
|
280
|
|
|
|
|
281
|
|
|
|
|
282
|
|
|
def print_queue_info(): |
|
283
|
|
|
print('{0:25} | {1:^15} | {2:^15} | {3:^15} |\ |
|
284
|
|
|
{4:^15} | {5:^15}'.format('Queue Name', 'Node Max Task', |
|
285
|
|
|
'Node Max Thread', 'Node Max Memory', |
|
286
|
|
|
'Max Task', 'Available Task')) |
|
287
|
|
|
for q in queues(): |
|
288
|
|
|
try: |
|
289
|
|
|
nc = scheduler.node_config(q) |
|
290
|
|
|
tpn = scheduler.tasks_per_node(q) |
|
291
|
|
|
avail = scheduler.available_tasks(q) |
|
292
|
|
|
except: |
|
|
|
|
|
|
293
|
|
|
nc = None |
|
294
|
|
|
tpn = None |
|
295
|
|
|
avail = None |
|
296
|
|
|
print('{0:25} | {1:^15} | {2:^15} |\ |
|
297
|
|
|
{3:^15} | {4:^15} | {5:^15}'.format(q, tpn, |
|
298
|
|
|
nc['max thread'], |
|
299
|
|
|
nc['max memory'], |
|
300
|
|
|
avail['max tasks'], |
|
301
|
|
|
avail['available'])) |
|
302
|
|
|
|
|
303
|
|
|
|
|
304
|
|
|
def create_submit(queue_id, script_name=None, **kwargs): |
|
305
|
|
|
|
|
306
|
|
|
if job_db is not None: |
|
307
|
|
|
if 'user_email' not in kwargs: |
|
308
|
|
|
email = job_db.user_db['user'].email |
|
309
|
|
|
if email != 'unknown': |
|
310
|
|
|
kwargs['user_email'] = email |
|
311
|
|
|
|
|
312
|
|
|
if scheduler is not None: |
|
313
|
|
|
script = scheduler.create_submit(queue_id, **kwargs) |
|
314
|
|
|
|
|
315
|
|
|
if script_name is not None: |
|
316
|
|
|
import os.path |
|
|
|
|
|
|
317
|
|
|
if not os.path.isfile(script_name): |
|
318
|
|
|
with open(script_name, 'w') as f: |
|
319
|
|
|
f.write(script) |
|
320
|
|
|
else: |
|
321
|
|
|
print('Warning file: {0} already exists.\ |
|
322
|
|
|
Please choose a different name'.format(script_name)) |
|
323
|
|
|
return script |
|
324
|
|
|
else: |
|
325
|
|
|
print('Warning job scheduler not detected') |
|
326
|
|
|
return None |
|
327
|
|
|
|
|
328
|
|
|
|
|
329
|
|
|
def submit(script_name, immediate, depends=None): |
|
330
|
|
|
|
|
331
|
|
|
if scheduler is None: |
|
332
|
|
|
return None |
|
333
|
|
|
|
|
334
|
|
|
job_id = -1 |
|
335
|
|
|
import os.path |
|
|
|
|
|
|
336
|
|
|
if os.path.isfile(script_name): |
|
337
|
|
|
job_id = scheduler.submit(script_name, immediate, depends) |
|
338
|
|
|
if job_id is not None: |
|
339
|
|
|
print('Job submitted with ID {0}'.format(job_id)) |
|
340
|
|
|
if job_db is not None and job_id is not None: |
|
341
|
|
|
from persist import Job |
|
|
|
|
|
|
342
|
|
|
job = Job(job_id, time.time()) |
|
343
|
|
|
with open(script_name, 'r') as f: |
|
344
|
|
|
for line in f: |
|
345
|
|
|
if line.split('=')[0] == 'export NUM_TASKS': |
|
346
|
|
|
job.num_tasks = line.split('=')[1].strip() |
|
347
|
|
|
if line.split('=')[0] == 'export TASKS_PER_NODE': |
|
348
|
|
|
job.tasks_per_node = line.split('=')[1].strip() |
|
349
|
|
|
if line.split('=')[0] == 'export THREADS_PER_TASK': |
|
350
|
|
|
job.threads_per_task = line.split('=')[1].strip() |
|
351
|
|
|
if line.split('=')[0] == 'export NUM_NODES': |
|
352
|
|
|
job.num_nodes = line.split('=')[1].strip() |
|
353
|
|
|
if line.split('=')[0] == 'export MYCLUSTER_QUEUE': |
|
354
|
|
|
job.queue = line.split('=')[1].strip() |
|
355
|
|
|
if line.split('=')[0] == 'export MYCLUSTER_JOB_NAME': |
|
356
|
|
|
job.job_name = line.split('=')[1].strip() |
|
357
|
|
|
|
|
358
|
|
|
job.script_name = script_name |
|
359
|
|
|
job.job_dir = os.path.dirname(os.path.abspath(script_name)) |
|
360
|
|
|
job_db.add_job(job) |
|
361
|
|
|
job_db.add_queue(job.queue, scheduler.name()) |
|
362
|
|
|
else: |
|
363
|
|
|
print('Error file: {0} does not exist.'.format(script_name)) |
|
364
|
|
|
|
|
365
|
|
|
return job_id |
|
366
|
|
|
|
|
367
|
|
|
|
|
368
|
|
|
def delete(job_id): |
|
369
|
|
|
# Add check |
|
370
|
|
|
job = job_db.get(job_id) |
|
371
|
|
|
site_name = job.queue.site_name |
|
372
|
|
|
scheduler_type = job_db.site_db[site_name].scheduler_type |
|
373
|
|
|
|
|
374
|
|
|
if (scheduler.name() == site_name and |
|
375
|
|
|
scheduler.scheduler_type() == scheduler_type): |
|
376
|
|
|
scheduler.delete(job_id) |
|
377
|
|
|
else: |
|
378
|
|
|
print('JobID: ' + str(job_id) + ' not found at current site') |
|
379
|
|
|
|
|
380
|
|
|
|
|
381
|
|
|
def add_remote(remote_site): |
|
382
|
|
|
if job_db is not None: |
|
383
|
|
|
job_db.add_remote(remote_site) |
|
384
|
|
|
|
|
385
|
|
|
|
|
386
|
|
|
def export(job_id): |
|
387
|
|
|
pass |
|
388
|
|
|
|
|
389
|
|
|
|
|
390
|
|
|
def job_list(): |
|
391
|
|
|
if job_db is not None: |
|
392
|
|
|
return job_db.job_db |
|
393
|
|
|
return [] |
|
394
|
|
|
|
|
395
|
|
|
|
|
396
|
|
|
def get_job(job_id): |
|
397
|
|
|
if job_db is not None: |
|
398
|
|
|
return job_db.get(job_id) |
|
399
|
|
|
return None |
|
400
|
|
|
|
|
401
|
|
|
|
|
402
|
|
|
def my_import(name): |
|
403
|
|
|
mod = __import__(name) |
|
404
|
|
|
components = name.split('.') |
|
405
|
|
|
for comp in components[1:]: |
|
406
|
|
|
mod = getattr(mod, comp) |
|
407
|
|
|
return mod |
|
408
|
|
|
|
|
409
|
|
|
|
|
410
|
|
|
def get_directory(): |
|
411
|
|
|
from os.path import expanduser |
|
412
|
|
|
home = expanduser("~") |
|
413
|
|
|
directory = home + '/.mycluster/' |
|
414
|
|
|
return directory |
|
415
|
|
|
|
|
416
|
|
|
|
|
417
|
|
|
def create_directory(): |
|
418
|
|
|
directory = get_directory() |
|
419
|
|
|
if not os.path.exists(directory): |
|
420
|
|
|
os.makedirs(directory) |
|
421
|
|
|
return True |
|
422
|
|
|
else: |
|
423
|
|
|
return False |
|
424
|
|
|
|
|
425
|
|
|
|
|
426
|
|
|
def create_db(): |
|
427
|
|
|
global job_db |
|
428
|
|
|
try: |
|
429
|
|
|
from persist import JobDB |
|
|
|
|
|
|
430
|
|
|
job_db = JobDB() |
|
431
|
|
|
except Exception as e: |
|
432
|
|
|
print('Database failed to initialise. Error Message: ' + str(e)) |
|
433
|
|
|
|
|
434
|
|
|
return job_db |
|
435
|
|
|
|
|
436
|
|
|
|
|
437
|
|
|
def update_db(): |
|
438
|
|
|
try: |
|
439
|
|
|
if scheduler is not None: |
|
440
|
|
|
status_dict = scheduler.status() |
|
441
|
|
|
jobs = job_list() |
|
442
|
|
|
for j in jobs: |
|
443
|
|
|
if jobs[j].status != 'completed': |
|
444
|
|
|
job_id = jobs[j].job_id |
|
445
|
|
|
if job_id in status_dict: |
|
446
|
|
|
state = status_dict[job_id] |
|
447
|
|
|
if state == 'r': |
|
448
|
|
|
jobs[j].update_status('running') |
|
449
|
|
|
else: |
|
450
|
|
|
jobs[j].update_status('completed') |
|
451
|
|
|
jobs[j].update_stats(scheduler.job_stats(job_id)) |
|
452
|
|
|
except Exception as e: |
|
453
|
|
|
print('Database failed to update. Error Message: ' + str(e)) |
|
454
|
|
|
|
|
455
|
|
|
|
|
456
|
|
|
def sysscribe_update(job_id): |
|
457
|
|
|
if job_db is not None: |
|
458
|
|
|
from sysscribe import system |
|
|
|
|
|
|
459
|
|
|
job_db.get(job_id).update_sysscribe(system.system_dict()) |
|
460
|
|
|
|
|
461
|
|
|
|
|
462
|
|
|
def email_update(email): |
|
463
|
|
|
if job_db is not None: |
|
464
|
|
|
job_db.user_db['user'].update_email(email) |
|
465
|
|
|
|
|
466
|
|
|
|
|
467
|
|
|
def firstname_update(name): |
|
468
|
|
|
if job_db is not None: |
|
469
|
|
|
job_db.user_db['user'].firstname(name) |
|
470
|
|
|
|
|
471
|
|
|
|
|
472
|
|
|
def lastname_update(name): |
|
473
|
|
|
if job_db is not None: |
|
474
|
|
|
job_db.user_db['user'].lastname(name) |
|
475
|
|
|
|
|
476
|
|
|
|
|
477
|
|
|
def get_user(): |
|
478
|
|
|
if job_db is not None: |
|
479
|
|
|
return (job_db.user_db['user'].first_name + ' ' + |
|
480
|
|
|
job_db.user_db['user'].last_name) |
|
481
|
|
|
else: |
|
482
|
|
|
return 'unknown' |
|
483
|
|
|
|
|
484
|
|
|
|
|
485
|
|
|
def get_email(): |
|
486
|
|
|
if job_db is not None: |
|
487
|
|
|
return job_db.user_db['user'].email |
|
488
|
|
|
else: |
|
489
|
|
|
return 'unknown' |
|
490
|
|
|
|
|
491
|
|
|
|
|
492
|
|
|
def get_site(): |
|
493
|
|
|
return 'unknown' |
|
494
|
|
|
|
|
495
|
|
|
|
|
496
|
|
|
def appname_update(job_id, appname): |
|
497
|
|
|
if job_db is not None: |
|
498
|
|
|
job_db.get(job_id).appname(appname) |
|
499
|
|
|
|
|
500
|
|
|
|
|
501
|
|
|
def appdata_update(job_id, appdata): |
|
502
|
|
|
if job_db is not None: |
|
503
|
|
|
job_db.get(job_id).appdata(appdata) |
|
504
|
|
|
|
|
505
|
|
|
|
|
506
|
|
|
def init(silent=False): |
|
507
|
|
|
global scheduler |
|
508
|
|
|
scheduler = detect_scheduling_sys() |
|
509
|
|
|
created = create_directory() |
|
510
|
|
|
if create_db() is not None: |
|
511
|
|
|
update_db() |
|
512
|
|
|
|
|
513
|
|
|
if not silent: |
|
514
|
|
|
print('MyCluster Initialisation Info') |
|
515
|
|
|
print('-----------------------------') |
|
516
|
|
|
print('Local database in: ' + get_directory()) |
|
517
|
|
|
print('User: ' + get_user()) |
|
518
|
|
|
print('Email: ' + get_email()) |
|
519
|
|
|
if not scheduler: |
|
520
|
|
|
print('Local job scheduler: None') |
|
521
|
|
|
else: |
|
522
|
|
|
print('Local job scheduler: ' + scheduler.scheduler_type()) |
|
523
|
|
|
print('Site name: ' + get_site()) |
|
524
|
|
|
print('') |
|
525
|
|
|
|