Completed
Pull Request — master (#153)
by
unknown
01:05
created

JobGenerator.specify_account_name_from_file()   A

Complexity

Conditions 4

Size

Total Lines 9

Duplication

Lines 0
Ratio 0 %

Importance

Changes 2
Bugs 0 Features 1
Metric Value
cc 4
dl 0
loc 9
rs 9.2
c 2
b 0
f 1
1
from __future__ import absolute_import
2
3
import os
4
import re
5
from smartdispatch.pbs import PBS
6
from smartdispatch import utils
7
8
9
def job_generator_factory(queue, commands, prolog=[], epilog=[], command_params={}, cluster_name=None, base_path="./"):
10
    if cluster_name == "guillimin":
11
        return GuilliminJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
12
    elif cluster_name == "mammouth":
13
        return MammouthJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
14
    elif cluster_name == "helios":
15
        return HeliosJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
16
    elif cluster_name == "hades":
17
        return HadesJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
18
19
    return JobGenerator(queue, commands, prolog, epilog, command_params, base_path)
20
21
22
class JobGenerator(object):
23
24
    """ Offers functionalities to generate PBS files for a given queue.
25
26
    Parameters
27
    ----------
28
    queue : `Queue` instance
29
        queue on which commands will be executed
30
    commands : list of str
31
        commands to put in PBS files
32
    prolog : list of str
33
        code to execute before the commands
34
    epilog : list of str
35
        code to execute after the commands
36
    command_params : dict
37
        information about the commands
38
    """
39
40
    def __init__(self, queue, commands, prolog=[], epilog=[], command_params={}, base_path="./"):
41
        self.prolog = prolog
42
        self.commands = commands
43
        self.epilog = epilog
44
        self.queue = queue
45
        self.job_log_filename = '"{base_path}/logs/job/"$PBS_JOBID".{{ext}}"'.format(base_path=base_path)
46
47
        self.nb_cores_per_command = command_params.get('nb_cores_per_command', 1)
48
        self.nb_gpus_per_command = command_params.get('nb_gpus_per_command', 1)
49
        self.mem_per_command = command_params.get('mem_per_command', None)
50
51
        self.pbs_list = self._generate_base_pbs()
52
        self._add_cluster_specific_rules()
53
54
    def _add_cluster_specific_rules(self):
55
        pass
56
57
    def add_pbs_flags(self, flags):
58
        resources = {}
59
        options = {}
60
61
        for flag in flags:
62
            flag = flag
63
            if flag.startswith('-l'):
64
                resource = flag[2:]
65
                split = resource.find('=')
66
                resources[resource[:split]] = resource[split+1:]
67
            elif flag.startswith('-'):
68
                options[flag[1:2]] = flag[2:]
69
            else:
70
                raise ValueError("Invalid PBS flag ({})".format(flag))
71
72
        for pbs in self.pbs_list:
73
            pbs.add_resources(**resources)
74
            pbs.add_options(**options)
75
76
    def _generate_base_pbs(self):
77
        """ Generates PBS files allowing the execution of every commands on the given queue. """
78
        nb_commands_per_node = self.queue.nb_cores_per_node // self.nb_cores_per_command
79
80
        if self.queue.nb_gpus_per_node > 0 and self.nb_gpus_per_command > 0:
81
            nb_commands_per_node = min(nb_commands_per_node, self.queue.nb_gpus_per_node // self.nb_gpus_per_command)
82
83
        # Limit number of running commands by the amount of available memory on the node.
84
        if self.mem_per_command is not None:
85
            nb_commands_per_node = min(nb_commands_per_node, self.queue.mem_per_node // self.mem_per_command)
86
87
        pbs_files = []
88
        # Distribute equally the jobs among the PBS files and generate those files
89
        for i, commands in enumerate(utils.chunks(self.commands, n=nb_commands_per_node)):
90
            pbs = PBS(self.queue.name, self.queue.walltime)
91
92
            # TODO Move the add_options into the JobManager once created.
93
            pbs.add_options(o=self.job_log_filename.format(ext='out'), e=self.job_log_filename.format(ext='err'))
94
95
            # Set resource: nodes
96
            resource = "1:ppn={ppn}".format(ppn=len(commands) * self.nb_cores_per_command)
97
            if self.queue.nb_gpus_per_node > 0:
98
                resource += ":gpus={gpus}".format(gpus=len(commands) * self.nb_gpus_per_command)
99
            pbs.add_resources(nodes=resource)
100
101
            if self.mem_per_command is not None:
102
                resource = "{mem}Gb".format(mem=len(commands) * self.mem_per_command)
103
                pbs.add_resources(mem=resource)
104
105
            pbs.add_modules_to_load(*self.queue.modules)
106
            pbs.add_to_prolog(*self.prolog)
107
            pbs.add_commands(*commands)
108
            pbs.add_to_epilog(*self.epilog)
109
110
            pbs_files.append(pbs)
111
112
        return pbs_files
113
114
    def write_pbs_files(self, pbs_dir="./"):
115
        """ Writes PBS files allowing the execution of every commands on the given queue.
116
117
        Parameters
118
        ----------
119
        pbs_dir : str
120
            folder where to save pbs files
121
        """
122
        pbs_filenames = []
123
        for i, pbs in enumerate(self.pbs_list):
124
            pbs_filename = os.path.join(pbs_dir, 'job_commands_' + str(i) + '.sh')
125
            pbs.save(pbs_filename)
126
            pbs_filenames.append(pbs_filename)
127
128
        return pbs_filenames
129
130
    def specify_account_name_from_env(self, environment_variable_name):
131
        if environment_variable_name not in os.environ:
132
            raise ValueError("Undefined environment variable: ${}. Please, provide your account name!".format(environment_variable_name))
133
134
        account_name = os.path.basename(os.path.realpath(os.getenv(environment_variable_name)))
135
        for pbs in self.pbs_list:
136
            pbs.add_options(A=account_name)
137
138
    def specify_account_name_from_file(self, rapid_filename):
139
        if not os.path.isfile(rapid_filename):
140
            raise ValueError("Account name file {} does not exist. Please, provide your account name!".format(rapid_filename))
141
142
        with open(rapid_filename, 'r') as rapid_file:
143
            account_name = rapid_file.read().strip()
144
145
        for pbs in self.pbs_list:
146
            pbs.add_options(A=account_name)
147
148
149
class MammouthJobGenerator(JobGenerator):
150
151
    def _add_cluster_specific_rules(self):
152
        if self.queue.name.endswith("@mp2"):
153
            for pbs in self.pbs_list:
154
                pbs.resources['nodes'] = re.sub("ppn=[0-9]+", "ppn=1", pbs.resources['nodes'])
155
156
157
class HadesJobGenerator(JobGenerator):
158
159
    def _add_cluster_specific_rules(self):
160
        for pbs in self.pbs_list:
161
            gpus = re.match(".*gpus=([0-9]+)", pbs.resources['nodes']).group(1)
162
            pbs.resources['nodes'] = re.sub("ppn=[0-9]+", "ppn={}".format(gpus), pbs.resources['nodes'])
163
            pbs.resources['nodes'] = re.sub(":gpus=[0-9]+", "", pbs.resources['nodes'])
164
165
166
class GuilliminJobGenerator(JobGenerator):
167
168
    def _add_cluster_specific_rules(self):
169
        return self.specify_account_name_from_env('HOME_GROUP')
170
171
172
# https://wiki.calculquebec.ca/w/Ex%C3%A9cuter_une_t%C3%A2che#tab=tab6
173
class HeliosJobGenerator(JobGenerator):
174
175
    def _add_cluster_specific_rules(self):
176
        self.specify_account_name_from_file(os.path.join(os.environ['HOME'], ".default_rap"))
177
178
        for pbs in self.pbs_list:
179
            # Remove forbidden ppn option. Default is 2 cores per gpu.
180
            pbs.resources['nodes'] = re.sub(":ppn=[0-9]+", "", pbs.resources['nodes'])
181