1
|
|
|
from smartdispatch import get_available_queues |
2
|
|
|
|
3
|
|
|
|
4
|
|
|
class Queue(object): |
5
|
|
|
def __init__(self, name, cluster_name, walltime=None, nb_cores_per_node=None, nb_gpus_per_node=None, mem_per_node=None, modules=None): |
6
|
|
|
self.name = name |
7
|
|
|
self.cluster_name = cluster_name |
8
|
|
|
|
9
|
|
|
self.walltime = walltime |
10
|
|
|
self.nb_cores_per_node = nb_cores_per_node |
11
|
|
|
self.nb_gpus_per_node = nb_gpus_per_node |
12
|
|
|
self.mem_per_node = mem_per_node |
13
|
|
|
self.modules = modules if modules is not None else [] |
14
|
|
|
|
15
|
|
|
available_queues = get_available_queues(self.cluster_name) |
16
|
|
|
if self.name in available_queues: |
17
|
|
|
queue_infos = available_queues[self.name] |
18
|
|
|
|
19
|
|
|
if self.walltime is None: |
20
|
|
|
self.walltime = queue_infos['max_walltime'] |
21
|
|
|
if self.nb_cores_per_node is None: |
22
|
|
|
self.nb_cores_per_node = queue_infos['cores'] |
23
|
|
|
if self.nb_gpus_per_node is None: |
24
|
|
|
self.nb_gpus_per_node = queue_infos.get('gpus', 0) |
25
|
|
|
if self.mem_per_node is None: |
26
|
|
|
self.mem_per_node = queue_infos['ram'] |
27
|
|
|
|
28
|
|
|
# Add default modules to load for this queue if any. |
29
|
|
|
self.modules = queue_infos.get('modules', []) + self.modules |
30
|
|
|
|
31
|
|
|
# Make sure Queue instance is in a valid state. |
32
|
|
|
if self.walltime is None: |
33
|
|
|
raise ValueError("Walltime must be provided on this queue!") |
34
|
|
|
|
35
|
|
|
if self.nb_cores_per_node is None or self.nb_cores_per_node <= 0: |
36
|
|
|
raise ValueError("Queues must have at least one core!") |
37
|
|
|
|
38
|
|
|
if self.nb_gpus_per_node is None: |
39
|
|
|
self.nb_gpus_per_node = 0 # Means, there are no gpus on those queue nodes |
40
|
|
|
|
41
|
|
|
if self.mem_per_node is None or self.mem_per_node <= 0: |
42
|
|
|
raise ValueError("Queues must have at least some memory!") |
43
|
|
|
|