TestHadesQueue   A
last analyzed

Complexity

Total Complexity 6

Size/Duplication

Total Lines 30
Duplicated Lines 0 %

Importance

Changes 2
Bugs 0 Features 0
Metric Value
dl 0
loc 30
rs 10
c 2
b 0
f 0
wmc 6

6 Methods

Rating   Name   Duplication   Size   Complexity  
A test_generate_pbs_no_gpus_used() 0 3 1
A setUp() 0 11 1
A test_pbs_split_2_job_nb_commands() 0 3 1
A test_pbs_split_2_job() 0 2 1
A test_pbs_split_1_job() 0 2 1
A test_generate_pbs_ppn() 0 2 1
1
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
2
3
import os
4
import tempfile
5
import shutil
6
from smartdispatch.queue import Queue
7
from smartdispatch.job_generator import JobGenerator, job_generator_factory
8
from smartdispatch.job_generator import HeliosJobGenerator, HadesJobGenerator
9
from smartdispatch.job_generator import GuilliminJobGenerator, MammouthJobGenerator
10
11
12
class TestJobGenerator(object):
13
    pbs_flags = ['-lfeature=k80', '-lwalltime=42:42', '-lnodes=6:gpus=66', '-m', '-A123-asd-11', '-t10,20,30']
14
15
    def setUp(self):
16
        self.testing_dir = tempfile.mkdtemp()
17
        self.cluster_name = "skynet"
18
        self.name = "9000@hal"
19
        self.walltime = "10:00"
20
        self.cores = 42
21
        self.gpus = 42
22
        self.mem_per_node = 32
23
        self.modules = ["cuda", "python"]
24
25
        self.queue = Queue(self.name, self.cluster_name, self.walltime, self.cores, 0, self.mem_per_node, self.modules)
26
        self.queue_gpu = Queue(self.name, self.cluster_name, self.walltime, self.cores, self.gpus, self.mem_per_node, self.modules)
27
28
        self.commands = ["echo 1", "echo 2", "echo 3", "echo 4"]
29
        self.prolog = ["echo prolog"]
30
        self.epilog = ["echo epilog"]
31
32
    def tearDown(self):
33
        shutil.rmtree(self.testing_dir)
34
35 View Code Duplication
    def test_generate_pbs(self):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
36
        job_generator = JobGenerator(self.queue, self.commands, prolog=self.prolog, epilog=self.epilog)
37
38
        # Test nb_cores_per_command argument
39
        # Should needs one PBS file
40
        assert_equal(len(job_generator.pbs_list), 1)
41
        assert_equal(job_generator.pbs_list[0].commands, self.commands)
42
        assert_equal(job_generator.pbs_list[0].prolog, self.prolog)
43
        assert_equal(job_generator.pbs_list[0].epilog, self.epilog)
44
45 View Code Duplication
    def test_generate_pbs2_cpu(self):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
46
        # Should needs two PBS file
47
        command_params = {'nb_cores_per_command': self.cores // 2}
48
        job_generator = JobGenerator(self.queue, self.commands, command_params=command_params)
49
        assert_equal(len(job_generator.pbs_list), 2)
50
        assert_equal(job_generator.pbs_list[0].commands, self.commands[:2])
51
        assert_equal(job_generator.pbs_list[1].commands, self.commands[2:])
52
53 View Code Duplication
    def test_generate_pbs4_cpu(self):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
54
        # Should needs four PBS file
55
        command_params = {'nb_cores_per_command': self.cores}
56
        job_generator = JobGenerator(self.queue, self.commands, command_params=command_params)
57
        assert_equal(len(job_generator.pbs_list), 4)
58
        assert_equal([pbs.commands[0] for pbs in job_generator.pbs_list], self.commands)
59
60
        # Since queue has no gpus it should not be specified in PBS resource `nodes`
61
        assert_true('gpus' not in job_generator.pbs_list[0].resources['nodes'])
62
63
        # Test modules to load
64
        # Check if needed modules for this queue are included in the PBS file
65
        assert_equal(job_generator.pbs_list[0].modules, self.modules)
66
67 View Code Duplication
    def test_generate_pbs2_gpu(self):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
68
        # Test nb_gpus_per_command argument
69
        # Should needs two PBS file
70
        command_params = {'nb_gpus_per_command': self.gpus // 2}
71
        job_generator = JobGenerator(self.queue_gpu, self.commands, command_params=command_params)
72
        assert_equal(len(job_generator.pbs_list), 2)
73
        assert_equal(job_generator.pbs_list[0].commands, self.commands[:2])
74
        assert_equal(job_generator.pbs_list[1].commands, self.commands[2:])
75
76 View Code Duplication
    def test_generate_pbs4_gpu(self):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
77
        # Should needs four PBS files
78
        command_params = {'nb_gpus_per_command': self.gpus}
79
        job_generator = JobGenerator(self.queue_gpu, self.commands, command_params=command_params)
80
        assert_equal(len(job_generator.pbs_list), 4)
81
        assert_equal([pbs.commands[0] for pbs in job_generator.pbs_list], self.commands)
82
83
        # Since queue has gpus it should be specified in PBS resource `nodes`
84
        assert_true('gpus' in job_generator.pbs_list[0].resources['nodes'])
85
86
        # Test modules to load
87
        # Check if needed modules for this queue are included in the PBS file
88
        assert_equal(job_generator.pbs_list[0].modules, self.modules)
89
90
    def test_write_pbs_files(self):
91
        commands = ["echo 1", "echo 2", "echo 3", "echo 4"]
92
        command_params = {'nb_cores_per_command': self.cores}
93
        job_generator = JobGenerator(self.queue, commands, command_params=command_params)
94
        filenames = job_generator.write_pbs_files(self.testing_dir)
95
        assert_equal(len(filenames), 4)
96
97
    def _test_add_pbs_flags(self, flags):
98
        job_generator = JobGenerator(self.queue, self.commands)
99
        job_generator.add_pbs_flags(flags)
100
101
        resources = []
102
        options = []
103
104
        for flag in flags:
105
            if flag.startswith('-l'):
106
                resources += [flag[:2] + ' ' + flag[2:]]
107
            elif flag.startswith('-'):
108
                options += [(flag[:2] + ' ' + flag[2:]).strip()]
109
110
        for pbs in job_generator.pbs_list:
111
            pbs_str = pbs.__str__()
112
            for flag in resources:
113
                assert_equal(pbs_str.count(flag), 1)
114
                assert_equal(pbs_str.count(flag[:flag.find('=')]), 1)
115
            for flag in options:
116
                assert_equal(pbs_str.count(flag), 1)
117
118
    def test_add_pbs_flags(self):
119
        for flag in self.pbs_flags:
120
            yield self._test_add_pbs_flags, [flag]
121
122
        yield self._test_add_pbs_flags, self.pbs_flags
123
124
    def test_add_pbs_flags_invalid(self):
125
        assert_raises(ValueError, self._test_add_pbs_flags, 'weeee')
126
127
    def test_add_pbs_flags_invalid_resource(self):
128
        assert_raises(ValueError, self._test_add_pbs_flags, '-l weeee')
129
130
131
class TestGuilliminQueue(object):
132
133
    def setUp(self):
134
        self.commands = ["echo 1", "echo 2", "echo 3", "echo 4"]
135
        self.queue = Queue("test", "guillimin", "00:01", 1, 1, 1)
136
137
        self.bak_env_home_group = os.environ.get('HOME_GROUP')
138
        if self.bak_env_home_group is not None:
139
            del os.environ['HOME_GROUP']
140
141
    def tearDown(self):
142
        if self.bak_env_home_group is not None:
143
            os.environ['HOME_GROUP'] = self.bak_env_home_group
144
145
    def test_generate_pbs_no_home(self):
146
        assert_raises(ValueError, GuilliminJobGenerator, self.queue, self.commands)
147
148
    def test_generate_pbs(self):
149
        os.environ['HOME_GROUP'] = "/path/to/group"
150
        job_generator = GuilliminJobGenerator(self.queue, self.commands)
151
        pbs = job_generator.pbs_list[0]
152
        assert_true("-A" in pbs.options)
153
        assert_true(pbs.options["-A"] == 'group')
154
155
156
class TestMammouthQueue(object):
157
158
    def setUp(self):
159
        self.commands = ["echo 1", "echo 2", "echo 3", "echo 4"]
160
        self.queue = Queue("qtest@mp2", "mammouth")
161
162
    def test_generate_pbs(self):
163
        job_generator = MammouthJobGenerator(self.queue, self.commands)
164
165
        assert_true("ppn=1" in str(job_generator.pbs_list[0]))
166
167
168
class TestHeliosQueue(object):
169
170
    def setUp(self):
171
        self.commands = ["echo 1", "echo 2", "echo 3", "echo 4"]
172
        self.queue = Queue("gpu_8", "helios")
173
174
        self._home_backup = os.environ['HOME']
175
        os.environ['HOME'] = tempfile.mkdtemp()
176
177
        self.rap_filename = os.path.join(os.environ['HOME'], ".default_rap")
178
        if os.path.isfile(self.rap_filename):
179
            raise Exception("Test fail: {} should not be there.".format(self.rap_filename))
180
        else:
181
            self.rapid = 'asd-123-ab'
182
            with open(self.rap_filename, 'w+') as rap_file:
183
                rap_file.write(self.rapid)
184
185
        self.job_generator = HeliosJobGenerator(self.queue, self.commands)
186
187
    def tearDown(self):
188
        shutil.rmtree(os.environ['HOME'])
189
        os.environ['HOME'] = self._home_backup
190
191
    def test_generate_pbs_invalid_group(self):
192
        os.remove(self.rap_filename)
193
194
        assert_raises(ValueError, HeliosJobGenerator, self.queue, self.commands)
195
196
    def test_generate_pbs_valid_group(self):
197
        pbs = self.job_generator.pbs_list[0]
198
199
        assert_equal(pbs.options['-A'], self.rapid)
200
201
    def test_generate_pbs_ppn_is_absent(self):
202
        assert_false("ppn=" in str(self.job_generator.pbs_list[0]))
203
204
    def test_generate_pbs_even_nb_commands(self):
205
        assert_true("gpus=4" in str(self.job_generator.pbs_list[0]))
206
207
    def test_generate_pbs_odd_nb_commands(self):
208
        commands = ["echo 1", "echo 2", "echo 3", "echo 4", "echo 5"]
209
        job_generator = HeliosJobGenerator(self.queue, commands)
210
211
        assert_true("gpus=5" in str(job_generator.pbs_list[0]))
212
213
214
class TestHadesQueue(object):
215
216
    def setUp(self):
217
        self.queue = Queue("@hades", "hades")
218
219
        self.commands4 = ["echo 1", "echo 2", "echo 3", "echo 4"]
220
        job_generator = HadesJobGenerator(self.queue, self.commands4)
221
        self.pbs4 = job_generator.pbs_list
222
223
        # 8 commands chosen because there is 8 cores but still should be split because there is 6 gpu
224
        self.commands8 = ["echo 1", "echo 2", "echo 3", "echo 4", "echo 5", "echo 6", "echo 7", "echo 8"]
225
        job_generator = HadesJobGenerator(self.queue, self.commands8)
226
        self.pbs8 = job_generator.pbs_list
227
228
    def test_generate_pbs_ppn(self):
229
        assert_true("ppn={}".format(len(self.commands4)) in str(self.pbs4[0]))
230
231
    def test_generate_pbs_no_gpus_used(self):
232
        # Hades use ppn instead og the gpus flag and breaks if gpus is there
233
        assert_false("gpus=" in str(self.pbs4[0]))
234
235
    def test_pbs_split_1_job(self):
236
        assert_equal(len(self.pbs4), 1)
237
238
    def test_pbs_split_2_job(self):
239
        assert_equal(len(self.pbs8), 2)
240
241
    def test_pbs_split_2_job_nb_commands(self):
242
        assert_true("ppn=6" in str(self.pbs8[0]))
243
        assert_true("ppn=2" in str(self.pbs8[1]))
244
245
246
class TestJobGeneratorFactory(object):
247
248
    def setUp(self):
249
        self._home_backup = os.environ['HOME']
250
        os.environ['HOME'] = tempfile.mkdtemp()
251
252
        self.rap_filename = os.path.join(os.environ['HOME'], ".default_rap")
253
        if os.path.isfile(self.rap_filename):
254
            raise Exception("Test fail: {} should not be there.".format(self.rap_filename))
255
        else:
256
            self.rapid = 'asd-123-ab'
257
            with open(self.rap_filename, 'w+') as rap_file:
258
                rap_file.write(self.rapid)
259
260
    def tearDown(self):
261
        shutil.rmtree(os.environ['HOME'])
262
        os.environ['HOME'] = self._home_backup
263
264
    def _test_job_generator_factory(self, cluster_name, job_generator_class):
265
        q = Queue("test", cluster_name, 1, 1, 1, 1)
266
        job_generator = job_generator_factory(q, [], cluster_name=cluster_name)
267
        assert_true(isinstance(job_generator, job_generator_class))
268
        assert_true(type(job_generator) is job_generator_class)
269
270
    def test_job_generator_factory(self):
271
        clusters = [("guillimin", GuilliminJobGenerator),
272
                    ("mammouth", MammouthJobGenerator),
273
                    ("helios", HeliosJobGenerator),
274
                    ("hades", HadesJobGenerator),
275
                    (None, JobGenerator)]
276
277
        for cluster_name, job_generator_class in clusters:
278
            yield self._test_job_generator_factory, cluster_name, job_generator_class
279