Completed
Pull Request — master (#166)
by
unknown
25s
created

JobGenerator.specify_account_name_from_file()   A

Complexity

Conditions 4

Size

Total Lines 9

Duplication

Lines 0
Ratio 0 %

Importance

Changes 2
Bugs 0 Features 1
Metric Value
cc 4
dl 0
loc 9
rs 9.2
c 2
b 0
f 1
1
from __future__ import absolute_import
2
3
import os
4
import re
5
from smartdispatch.pbs import PBS
6
from smartdispatch import utils
7
8
9
def job_generator_factory(queue, commands, prolog=[], epilog=[], command_params={}, cluster_name=None, base_path="./"):
10
    if cluster_name == "guillimin":
11
        return GuilliminJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
12
    elif cluster_name == "mammouth":
13
        return MammouthJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
14
    elif cluster_name == "helios":
15
        return HeliosJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
16
    elif cluster_name == "hades":
17
        return HadesJobGenerator(queue, commands, prolog, epilog, command_params, base_path)
18
19
    return JobGenerator(queue, commands, prolog, epilog, command_params, base_path)
20
21
22
class JobGenerator(object):
23
24
    """ Offers functionalities to generate PBS files for a given queue.
25
26
    Parameters
27
    ----------
28
    queue : `Queue` instance
29
        queue on which commands will be executed
30
    commands : list of str
31
        commands to put in PBS files
32
    prolog : list of str
33
        code to execute before the commands
34
    epilog : list of str
35
        code to execute after the commands
36
    command_params : dict
37
        information about the commands
38
    """
39
40
    def __init__(self, queue, commands, prolog=[], epilog=[], command_params={}, base_path="./"):
41
        self.prolog = prolog
42
        self.commands = commands
43
        self.epilog = epilog
44
        self.queue = queue
45
        self.job_log_filename = '"{base_path}/logs/job/"$PBS_JOBID".{{ext}}"'.format(base_path=base_path)
46
47
        self.nb_cores_per_command = command_params.get('nb_cores_per_command', 1)
48
        self.nb_gpus_per_command = command_params.get('nb_gpus_per_command', 1)
49
        #self.mem_per_command = command_params.get('mem_per_command', 0.0)
50
51
        self.pbs_list = self._generate_base_pbs()
52
        self._add_cluster_specific_rules()
53
54
    def _add_cluster_specific_rules(self):
55
        pass
56
57
    def add_pbs_flags(self, flags):
58
        resources = {}
59
        options = {}
60
61
        for flag in flags:
62
            flag = flag
63
            if flag.startswith('-l'):
64
                resource = flag[2:]
65
                split = resource.find('=')
66
                resources[resource[:split]] = resource[split+1:]
67
            elif flag.startswith('-'):
68
                options[flag[1:2]] = flag[2:]
69
            else:
70
                raise ValueError("Invalid PBS flag ({})".format(flag))
71
72
        for pbs in self.pbs_list:
73
            pbs.add_resources(**resources)
74
            pbs.add_options(**options)
75
76
    def add_sbatch_flags(self, flags):
77
        options = {}
78
79
        for flag in flags:
80
            split = flag.find('=')
81
            if flag.startswith('--'):
82
                if split == -1:
83
                    raise ValueError("Invalid SBATCH flag ({}), no '=' character found' ".format(flag))
84
                options[flag[:split].lstrip("-")] = flag[split+1:]
85
            elif flag.startswith('-') and split == -1:
86
                options[flag[1:2]] = flag[2:]
87
            else:
88
                raise ValueError("Invalid SBATCH flag ({}, is it a PBS flag?)".format(flag))
89
90
        for pbs in self.pbs_list:
91
            pbs.add_sbatch_options(**options)
92
93
    def _generate_base_pbs(self):
94
        """ Generates PBS files allowing the execution of every commands on the given queue. """
95
        nb_commands_per_node = self.queue.nb_cores_per_node // self.nb_cores_per_command
96
97
        if self.queue.nb_gpus_per_node > 0 and self.nb_gpus_per_command > 0:
98
            nb_commands_per_node = min(nb_commands_per_node, self.queue.nb_gpus_per_node // self.nb_gpus_per_command)
99
100
        pbs_files = []
101
        # Distribute equally the jobs among the PBS files and generate those files
102
        for i, commands in enumerate(utils.chunks(self.commands, n=nb_commands_per_node)):
103
            pbs = PBS(self.queue.name, self.queue.walltime)
104
105
            # TODO Move the add_options into the JobManager once created.
106
            pbs.add_options(o=self.job_log_filename.format(ext='out'), e=self.job_log_filename.format(ext='err'))
107
108
            # Set resource: nodes
109
            resource = "1:ppn={ppn}".format(ppn=len(commands) * self.nb_cores_per_command)
110
            if self.queue.nb_gpus_per_node > 0:
111
                resource += ":gpus={gpus}".format(gpus=len(commands) * self.nb_gpus_per_command)
112
113
            pbs.add_resources(nodes=resource)
114
115
            pbs.add_modules_to_load(*self.queue.modules)
116
            pbs.add_to_prolog(*self.prolog)
117
            pbs.add_commands(*commands)
118
            pbs.add_to_epilog(*self.epilog)
119
120
            pbs_files.append(pbs)
121
122
        return pbs_files
123
124
    def write_pbs_files(self, pbs_dir="./"):
125
        """ Writes PBS files allowing the execution of every commands on the given queue.
126
127
        Parameters
128
        ----------
129
        pbs_dir : str
130
            folder where to save pbs files
131
        """
132
        pbs_filenames = []
133
        for i, pbs in enumerate(self.pbs_list):
134
            pbs_filename = os.path.join(pbs_dir, 'job_commands_' + str(i) + '.sh')
135
            pbs.save(pbs_filename)
136
            pbs_filenames.append(pbs_filename)
137
138
        return pbs_filenames
139
140
    def specify_account_name_from_env(self, environment_variable_name):
141
        if environment_variable_name not in os.environ:
142
            raise ValueError("Undefined environment variable: ${}. Please, provide your account name!".format(environment_variable_name))
143
144
        account_name = os.path.basename(os.path.realpath(os.getenv(environment_variable_name)))
145
        for pbs in self.pbs_list:
146
            pbs.add_options(A=account_name)
147
148
    def specify_account_name_from_file(self, rapid_filename):
149
        if not os.path.isfile(rapid_filename):
150
            raise ValueError("Account name file {} does not exist. Please, provide your account name!".format(rapid_filename))
151
152
        with open(rapid_filename, 'r') as rapid_file:
153
            account_name = rapid_file.read().strip()
154
155
        for pbs in self.pbs_list:
156
            pbs.add_options(A=account_name)
157
158
159
class MammouthJobGenerator(JobGenerator):
160
161
    def _add_cluster_specific_rules(self):
162
        if self.queue.name.endswith("@mp2"):
163
            for pbs in self.pbs_list:
164
                pbs.resources['nodes'] = re.sub("ppn=[0-9]+", "ppn=1", pbs.resources['nodes'])
165
166
167
class HadesJobGenerator(JobGenerator):
168
169
    def _add_cluster_specific_rules(self):
170
        for pbs in self.pbs_list:
171
            gpus = re.match(".*gpus=([0-9]+)", pbs.resources['nodes']).group(1)
172
            pbs.resources['nodes'] = re.sub("ppn=[0-9]+", "ppn={}".format(gpus), pbs.resources['nodes'])
173
            pbs.resources['nodes'] = re.sub(":gpus=[0-9]+", "", pbs.resources['nodes'])
174
175
176
class GuilliminJobGenerator(JobGenerator):
177
178
    def _add_cluster_specific_rules(self):
179
        return self.specify_account_name_from_env('HOME_GROUP')
180
181
182
# https://wiki.calculquebec.ca/w/Ex%C3%A9cuter_une_t%C3%A2che#tab=tab6
183
class HeliosJobGenerator(JobGenerator):
184
185
    def _add_cluster_specific_rules(self):
186
        self.specify_account_name_from_file(os.path.join(os.environ['HOME'], ".default_rap"))
187
188
        for pbs in self.pbs_list:
189
            # Remove forbidden ppn option. Default is 2 cores per gpu.
190
            pbs.resources['nodes'] = re.sub(":ppn=[0-9]+", "", pbs.resources['nodes'])
191
192
class SlurmJobGenerator(JobGenerator):
193
194
    def _add_cluster_specific_rules(self):
195
        for pbs in self.pbs_list:
196
            gpus = re.match(".*gpus=([0-9]+)", pbs.resources['nodes']).group(1)
197
            ppn = re.match(".*ppn=([0-9]+)", pbs.resources['nodes']).group(1)
198
            pbs.resources['nodes'] = re.sub("ppn=[0-9]+", "", pbs.resources['nodes'])
199
            pbs.resources['nodes'] = re.sub(":gpus=[0-9]+", "", pbs.resources['nodes'])
200
            pbs.add_resources(naccelerators=gpus)
201
            pbs.add_resources(ncpus=ppn)
202