Code Duplication    Length = 9-13 lines in 3 locations

smartdispatch/tests/test_job_generator.py 3 locations

@@ 76-88 (lines=13) @@
73
        assert_equal(job_generator.pbs_list[0].modules, self.modules)
74
75
    def test_generate_pbs2_gpu(self):
76
        # Test nb_gpus_per_command argument
77
        # Should needs two PBS file
78
        command_params = {'nb_gpus_per_command': self.gpus // 2}
79
        job_generator = JobGenerator(self.queue_gpu, self.commands, command_params=command_params)
80
        assert_equal(len(job_generator.pbs_list), 2)
81
        assert_equal(job_generator.pbs_list[0].commands, self.commands[:2])
82
        assert_equal(job_generator.pbs_list[1].commands, self.commands[2:])
83
84
    def test_generate_pbs4_gpu(self):
85
        # Should needs four PBS files
86
        command_params = {'nb_gpus_per_command': self.gpus}
87
        job_generator = JobGenerator(self.queue_gpu, self.commands, command_params=command_params)
88
        assert_equal(len(job_generator.pbs_list), 4)
89
        assert_equal([pbs.commands[0] for pbs in job_generator.pbs_list], self.commands)
90
91
        # Since queue has gpus it should be specified in PBS resource `nodes`
@@ 53-65 (lines=13) @@
50
        assert_equal(job_generator.pbs_list[0].prolog, self.prolog)
51
        assert_equal(job_generator.pbs_list[0].epilog, self.epilog)
52
53
    def test_generate_pbs2_cpu(self):
54
        # Should needs two PBS file
55
        command_params = {'nb_cores_per_command': self.cores // 2}
56
        job_generator = JobGenerator(self.queue, self.commands, command_params=command_params)
57
        assert_equal(len(job_generator.pbs_list), 2)
58
        assert_equal(job_generator.pbs_list[0].commands, self.commands[:2])
59
        assert_equal(job_generator.pbs_list[1].commands, self.commands[2:])
60
61
    def test_generate_pbs4_cpu(self):
62
        # Should needs four PBS file
63
        command_params = {'nb_cores_per_command': self.cores}
64
        job_generator = JobGenerator(self.queue, self.commands, command_params=command_params)
65
        assert_equal(len(job_generator.pbs_list), 4)
66
        assert_equal([pbs.commands[0] for pbs in job_generator.pbs_list], self.commands)
67
68
        # Since queue has no gpus it should not be specified in PBS resource `nodes`
@@ 35-43 (lines=9) @@
32
33
        self.queue = Queue(self.name, self.cluster_name, self.walltime, self.cores, 0, self.mem_per_node, self.modules)
34
        self.queue_gpu = Queue(self.name, self.cluster_name, self.walltime, self.cores, self.gpus, self.mem_per_node, self.modules)
35
36
        self.commands = ["echo 1", "echo 2", "echo 3", "echo 4"]
37
        self.prolog = ["echo prolog"]
38
        self.epilog = ["echo epilog"]
39
40
    def tearDown(self):
41
        shutil.rmtree(self.testing_dir)
42
43
    def test_generate_pbs(self):
44
        job_generator = JobGenerator(self.queue, self.commands, prolog=self.prolog, epilog=self.epilog)
45
46
        # Test nb_cores_per_command argument