1
|
|
|
from nose.tools import assert_true, assert_false, assert_equal, assert_raises |
2
|
|
|
|
3
|
|
|
import os |
4
|
|
|
import unittest |
5
|
|
|
import tempfile |
6
|
|
|
import shutil |
7
|
|
|
from smartdispatch.queue import Queue |
8
|
|
|
from smartdispatch.job_generator import JobGenerator, job_generator_factory |
9
|
|
|
from smartdispatch.job_generator import HeliosJobGenerator, HadesJobGenerator |
10
|
|
|
from smartdispatch.job_generator import GuilliminJobGenerator, MammouthJobGenerator |
11
|
|
|
|
12
|
|
|
|
13
|
|
|
class TestJobGenerator(unittest.TestCase): |
14
|
|
|
|
15
|
|
|
def setUp(self): |
16
|
|
|
self.testing_dir = tempfile.mkdtemp() |
17
|
|
|
self.cluster_name = "skynet" |
18
|
|
|
self.name = "9000@hal" |
19
|
|
|
self.walltime = "10:00" |
20
|
|
|
self.cores = 42 |
21
|
|
|
self.gpus = 42 |
22
|
|
|
self.mem_per_node = 32 |
23
|
|
|
self.modules = ["cuda", "python"] |
24
|
|
|
|
25
|
|
|
self.queue = Queue(self.name, self.cluster_name, self.walltime, self.cores, 0, self.modules) |
26
|
|
|
self.queue_gpu = Queue(self.name, self.cluster_name, self.walltime, self.cores, self.gpus, self.mem_per_node, self.modules) |
27
|
|
|
|
28
|
|
|
def tearDown(self): |
29
|
|
|
shutil.rmtree(self.testing_dir) |
30
|
|
|
|
31
|
|
|
def test_generate_pbs(self): |
32
|
|
|
commands = ["echo 1", "echo 2", "echo 3", "echo 4"] |
33
|
|
|
|
34
|
|
|
job_generator = JobGenerator(self.queue, commands) |
35
|
|
|
|
36
|
|
|
# Test nb_cores_per_command argument |
37
|
|
|
# Should needs one PBS file |
38
|
|
|
pbs_list = job_generator.generate_pbs() |
39
|
|
|
assert_equal(len(pbs_list), 1) |
40
|
|
|
assert_equal(pbs_list[0].commands, commands) |
41
|
|
|
|
42
|
|
|
# Should needs two PBS file |
43
|
|
|
command_params = {'nb_cores_per_command': self.cores // 2} |
44
|
|
|
job_generator = JobGenerator(self.queue, commands, command_params) |
45
|
|
|
pbs_list = job_generator.generate_pbs() |
46
|
|
|
assert_equal(len(pbs_list), 2) |
47
|
|
|
assert_equal(pbs_list[0].commands, commands[:2]) |
48
|
|
|
assert_equal(pbs_list[1].commands, commands[2:]) |
49
|
|
|
|
50
|
|
|
# Should needs four PBS file |
51
|
|
|
command_params = {'nb_cores_per_command': self.cores} |
52
|
|
|
job_generator = JobGenerator(self.queue, commands, command_params) |
53
|
|
|
pbs_list = job_generator.generate_pbs() |
54
|
|
|
assert_equal(len(pbs_list), 4) |
55
|
|
|
assert_equal([pbs.commands[0] for pbs in pbs_list], commands) |
56
|
|
|
|
57
|
|
|
# Since queue has no gpus it should not be specified in PBS resource `nodes` |
58
|
|
|
assert_true('gpus' not in pbs_list[0].resources['nodes']) |
59
|
|
|
|
60
|
|
|
# Test nb_gpus_per_command argument |
61
|
|
|
# Should needs two PBS file |
62
|
|
|
command_params = {'nb_gpus_per_command': self.gpus // 2} |
63
|
|
|
job_generator = JobGenerator(self.queue_gpu, commands, command_params) |
64
|
|
|
pbs_list = job_generator.generate_pbs() |
65
|
|
|
assert_equal(len(pbs_list), 2) |
66
|
|
|
assert_equal(pbs_list[0].commands, commands[:2]) |
67
|
|
|
assert_equal(pbs_list[1].commands, commands[2:]) |
68
|
|
|
|
69
|
|
|
# Should needs four PBS files |
70
|
|
|
command_params = {'nb_gpus_per_command': self.gpus} |
71
|
|
|
job_generator = JobGenerator(self.queue_gpu, commands, command_params) |
72
|
|
|
pbs_list = job_generator.generate_pbs() |
73
|
|
|
assert_equal(len(pbs_list), 4) |
74
|
|
|
assert_equal([pbs.commands[0] for pbs in pbs_list], commands) |
75
|
|
|
|
76
|
|
|
# Since queue has gpus it should be specified in PBS resource `nodes` |
77
|
|
|
assert_true('gpus' in pbs_list[0].resources['nodes']) |
78
|
|
|
|
79
|
|
|
# Test modules to load |
80
|
|
|
# Check if needed modules for this queue are included in the PBS file |
81
|
|
|
assert_equal(pbs_list[0].modules, self.modules) |
82
|
|
|
|
83
|
|
|
# Test creating a simple job generator |
84
|
|
|
queue = {"queue_name": "qtest"} |
85
|
|
|
job_generator = JobGenerator(queue, commands=[]) |
86
|
|
|
|
87
|
|
|
def test_write_pbs_files(self): |
88
|
|
|
commands = ["echo 1", "echo 2", "echo 3", "echo 4"] |
89
|
|
|
command_params = {'nb_cores_per_command': self.cores} |
90
|
|
|
job_generator = JobGenerator(self.queue, commands, command_params) |
91
|
|
|
filenames = job_generator.write_pbs_files(self.testing_dir) |
92
|
|
|
assert_equal(len(filenames), 4) |
93
|
|
|
|
94
|
|
|
|
95
|
|
|
class TestGuilliminQueue(TestJobGenerator): |
96
|
|
|
|
97
|
|
|
def test_generate_pbs(self): |
98
|
|
|
commands = ["echo 1", "echo 2", "echo 3", "echo 4"] |
99
|
|
|
job_generator = GuilliminJobGenerator(self.queue, commands) |
100
|
|
|
|
101
|
|
|
bak_env_home_group = os.environ.get('HOME_GROUP') |
102
|
|
|
if bak_env_home_group is not None: |
103
|
|
|
del os.environ['HOME_GROUP'] |
104
|
|
|
|
105
|
|
|
assert_raises(ValueError, job_generator.generate_pbs) |
106
|
|
|
|
107
|
|
|
os.environ['HOME_GROUP'] = "/path/to/group" |
108
|
|
|
pbs = job_generator.generate_pbs()[0] |
109
|
|
|
assert_true("-A" in pbs.options) |
110
|
|
|
|
111
|
|
|
if bak_env_home_group is not None: |
112
|
|
|
os.environ['HOME_GROUP'] = bak_env_home_group |
113
|
|
|
|
114
|
|
|
|
115
|
|
|
class TestMammouthQueue(unittest.TestCase): |
116
|
|
|
|
117
|
|
|
def setUp(self): |
118
|
|
|
self.commands = ["echo 1", "echo 2", "echo 3", "echo 4"] |
119
|
|
|
self.queue = Queue("qtest@mp2", "mammouth") |
120
|
|
|
|
121
|
|
|
def test_generate_pbs(self): |
122
|
|
|
job_generator = MammouthJobGenerator(self.queue, self.commands) |
123
|
|
|
|
124
|
|
|
assert_true("ppn=1" in job_generator.generate_pbs()[0].__str__()) |
125
|
|
|
|
126
|
|
|
|
127
|
|
|
class TestHeliosQueue(unittest.TestCase): |
128
|
|
|
|
129
|
|
|
def setUp(self): |
130
|
|
|
self.commands = ["echo 1", "echo 2", "echo 3", "echo 4"] |
131
|
|
|
self.queue = Queue("gpu_8", "helios") |
132
|
|
|
|
133
|
|
|
self.env_val = 'RAP' |
134
|
|
|
|
135
|
|
|
self.bak_env_home_group = os.environ.get(self.env_val) |
136
|
|
|
if self.bak_env_home_group is not None: |
137
|
|
|
del os.environ[self.env_val] |
138
|
|
|
os.environ[self.env_val] = "/rap/group/" |
139
|
|
|
|
140
|
|
|
self.job_generator = HeliosJobGenerator(self.queue, self.commands) |
141
|
|
|
|
142
|
|
|
def tearDown(self): |
143
|
|
|
if self.bak_env_home_group is not None: |
144
|
|
|
os.environ[self.env_val] = self.bak_env_home_group |
145
|
|
|
|
146
|
|
|
def test_generate_pbs_invalid_group(self): |
147
|
|
|
del os.environ[self.env_val] |
148
|
|
|
|
149
|
|
|
assert_raises(ValueError, self.job_generator.generate_pbs) |
150
|
|
|
|
151
|
|
|
def test_generate_pbs_valid_group(self): |
152
|
|
|
pbs = self.job_generator.generate_pbs()[0] |
153
|
|
|
|
154
|
|
|
assert_equal(pbs.options['-A'], "group") |
155
|
|
|
|
156
|
|
|
def test_generate_pbs_ppn_is_absent(self): |
157
|
|
|
assert_false("ppn=" in self.job_generator.generate_pbs()[0].__str__()) |
158
|
|
|
|
159
|
|
|
def test_generate_pbs_even_nb_commands(self): |
160
|
|
|
assert_true("gpus=4" in self.job_generator.generate_pbs()[0].__str__()) |
161
|
|
|
|
162
|
|
|
def test_generate_pbs_odd_nb_commands(self): |
163
|
|
|
commands = ["echo 1", "echo 2", "echo 3", "echo 4", "echo 5"] |
164
|
|
|
job_generator = HeliosJobGenerator(self.queue, commands) |
165
|
|
|
|
166
|
|
|
assert_true("gpus=6" in job_generator.generate_pbs()[0].__str__()) |
167
|
|
|
|
168
|
|
|
|
169
|
|
|
class TestHadesQueue(unittest.TestCase): |
170
|
|
|
|
171
|
|
|
def setUp(self): |
172
|
|
|
self.queue = Queue("@hades", "hades") |
173
|
|
|
|
174
|
|
|
self.commands4 = ["echo 1", "echo 2", "echo 3", "echo 4"] |
175
|
|
|
self.pbs4 = HadesJobGenerator(self.queue, self.commands4).generate_pbs() |
176
|
|
|
|
177
|
|
|
# 8 commands chosen because there is 8 cores but still should be split because there is 6 gpu |
178
|
|
|
self.commands8 = ["echo 1", "echo 2", "echo 3", "echo 4", "echo 5", "echo 6", "echo 7", "echo 8"] |
179
|
|
|
self.pbs8 = HadesJobGenerator(self.queue, self.commands8).generate_pbs() |
180
|
|
|
|
181
|
|
|
def test_generate_pbs_ppn(self): |
182
|
|
|
assert_true("ppn={}".format(len(self.commands4)) in self.pbs4[0].__str__()) |
183
|
|
|
|
184
|
|
|
def test_generate_pbs_no_gpus_used(self): |
185
|
|
|
# Hades use ppn instead og the gpus flag and breaks if gpus is there |
186
|
|
|
assert_false("gpus=" in self.pbs4[0].__str__()) |
187
|
|
|
|
188
|
|
|
def test_pbs_split_1_job(self): |
189
|
|
|
assert_equal(len(self.pbs4), 1) |
190
|
|
|
|
191
|
|
|
def test_pbs_split_2_job(self): |
192
|
|
|
assert_equal(len(self.pbs8), 2) |
193
|
|
|
|
194
|
|
|
def test_pbs_split_2_job_nb_commands(self): |
195
|
|
|
assert_true("ppn=6" in self.pbs8[0].__str__()) |
196
|
|
|
assert_true("ppn=2" in self.pbs8[1].__str__()) |
197
|
|
|
|
198
|
|
|
|
199
|
|
|
def test_job_generator_factory(): |
200
|
|
|
queue = {"queue_name": "qtest"} |
201
|
|
|
commands = [] |
202
|
|
|
job_generator = job_generator_factory(queue, commands, cluster_name="guillimin") |
203
|
|
|
assert_true(isinstance(job_generator, GuilliminJobGenerator)) |
204
|
|
|
|
205
|
|
|
job_generator = job_generator_factory(queue, commands, cluster_name="mammouth") |
206
|
|
|
assert_true(isinstance(job_generator, MammouthJobGenerator)) |
207
|
|
|
|
208
|
|
|
job_generator = job_generator_factory(queue, commands, cluster_name="helios") |
209
|
|
|
assert_true(isinstance(job_generator, HeliosJobGenerator)) |
210
|
|
|
|
211
|
|
|
job_generator = job_generator_factory(queue, commands, cluster_name="hades") |
212
|
|
|
assert_true(isinstance(job_generator, HadesJobGenerator)) |
213
|
|
|
|
214
|
|
|
job_generator = job_generator_factory(queue, commands, cluster_name=None) |
215
|
|
|
assert_true(isinstance(job_generator, JobGenerator)) |
216
|
|
|
assert_true(not isinstance(job_generator, GuilliminJobGenerator)) |
217
|
|
|
assert_true(not isinstance(job_generator, MammouthJobGenerator)) |
218
|
|
|
assert_true(not isinstance(job_generator, HeliosJobGenerator)) |
219
|
|
|
assert_true(not isinstance(job_generator, HadesJobGenerator)) |
220
|
|
|
|