1
|
|
|
""" |
2
|
|
|
:mod:`tests.validation_test` -- Validation of Implemented Solutions |
3
|
|
|
=================================================================== |
4
|
|
|
|
5
|
|
|
.. module:: tests.validation_test |
6
|
|
|
:synopsis: Validation of all implemented solutions. |
7
|
|
|
|
8
|
|
|
.. moduleauthor:: Bill Maroney <[email protected]> |
9
|
|
|
""" |
10
|
|
|
|
11
|
|
|
import csv |
12
|
|
|
import importlib |
13
|
|
|
import os |
14
|
|
|
import pkgutil |
15
|
|
|
import re |
16
|
|
|
import unittest |
17
|
|
|
from time import time |
18
|
|
|
from typing import Callable, Union |
19
|
|
|
|
20
|
|
|
SOLUTION_MODULE_ROOT = "solutions" |
21
|
|
|
SOLUTION_MODULE_PATH = "{}.problem".format(SOLUTION_MODULE_ROOT) + "{}" |
22
|
|
|
DOC_ROOT = "docs" |
23
|
|
|
|
24
|
|
|
|
25
|
|
|
results = {} # global, shared, variable to hold the results for all validation tests |
|
|
|
|
26
|
|
|
|
27
|
|
|
|
28
|
|
|
def teardown_module(module): |
|
|
|
|
29
|
|
|
""" The teardown (module) function is called when all tests in this module are complete |
30
|
|
|
|
31
|
|
|
This function will collect and report on the results of all validation tests. That is, their correctness and their |
|
|
|
|
32
|
|
|
run-times. These reports are collated into ordered CSV files, each capturing up to 100 contiguous tests: |
|
|
|
|
33
|
|
|
|
34
|
|
|
* tests 1-100 |
35
|
|
|
* tests 101-200 |
36
|
|
|
* etc. |
37
|
|
|
|
38
|
|
|
These CSVs will be picked up by other code to ultimately build the documented project. These CSVs will contribute |
|
|
|
|
39
|
|
|
to the tables of solutions and their respective run-times. |
40
|
|
|
|
41
|
|
|
:param module: the module under teardown (i.e. :mod:`tests.validation_test`) |
42
|
|
|
:return: None |
43
|
|
|
""" |
44
|
|
|
|
45
|
|
|
def build_csv(start_problem: int, end_problem: int) -> None: |
46
|
|
|
""" Helper function to build a CSV for 100 problems in a 10x10 grid """ |
47
|
|
|
|
48
|
|
|
# Check that start_problem and end_problem define a block of 100 problems |
49
|
|
|
assert isinstance(start_problem, int), "start_problem must be an integer" |
50
|
|
|
assert isinstance(end_problem, int), "end_problem must be an integer" |
51
|
|
|
assert start_problem % 100 == 1, "start_problem must be 1 modulo 100" |
52
|
|
|
assert end_problem % 100 == 0, "end_problem must be 0 modulo 100" |
53
|
|
|
assert start_problem < end_problem, "start_problem must be less than end_problem" |
54
|
|
|
assert end_problem - start_problem + 1 == 100, "start_problem and end_problem must be 100 apart" |
|
|
|
|
55
|
|
|
|
56
|
|
|
global results # map the local results variable to the globally shared variable |
|
|
|
|
57
|
|
|
|
58
|
|
|
header = [""] + ["***{}**".format(i % 10) for i in range(1, 11)] |
59
|
|
|
|
60
|
|
|
path = os.path.join(DOC_ROOT, "{}-{}.csv".format(start_problem, end_problem)) |
61
|
|
|
with open(path, "w", newline="") as fp: |
|
|
|
|
62
|
|
|
# Open a CSV file and create a header row |
63
|
|
|
cw = csv.writer(fp) |
|
|
|
|
64
|
|
|
cw.writerow(header) |
65
|
|
|
|
66
|
|
|
row = [] # temporary list to hold each row as it is populated |
67
|
|
|
|
68
|
|
|
for i in range(start_problem, end_problem + 1): |
69
|
|
|
# Add the row range to the start of a CSV row |
70
|
|
|
if i % 10 == 1: |
71
|
|
|
row.append("**{} - {}**".format(i, i + 9)) |
72
|
|
|
|
73
|
|
|
# Add the results for problem number i to the next cell in the CSV |
74
|
|
|
try: |
75
|
|
|
if results[i]["correct"]: |
76
|
|
|
row.append("|tick| :doc:`{:.2f} <solutions/{}>`".format(results[i]["time"], i)) |
|
|
|
|
77
|
|
|
else: |
78
|
|
|
row.append("|warning| :doc:`{:.2f} <solutions/{}>`".format(results[i]["time"], i)) |
|
|
|
|
79
|
|
|
except KeyError: |
80
|
|
|
# KeyError caused by results[i], i.e. there isn't a solution for problem number i |
|
|
|
|
81
|
|
|
row.append("|cross|") |
82
|
|
|
|
83
|
|
|
# Flush the 10-long row to the CSV file |
84
|
|
|
if i % 10 == 0: |
85
|
|
|
cw.writerow(row) |
86
|
|
|
row = [] # blank row for the next one |
87
|
|
|
|
88
|
|
|
# Construct CSVs in blocks of 100 problems |
89
|
|
|
build_csv(1, 100) |
90
|
|
|
build_csv(101, 200) |
91
|
|
|
build_csv(201, 300) |
92
|
|
|
build_csv(301, 400) |
93
|
|
|
build_csv(401, 500) |
94
|
|
|
build_csv(501, 600) |
95
|
|
|
build_csv(601, 700) |
96
|
|
|
|
97
|
|
|
|
98
|
|
|
class TestSolutions(unittest.TestCase): |
99
|
|
|
""" Will contain all registered solutions once :func:`tests.validation_test.register_solutions` is invoked """ |
|
|
|
|
100
|
|
|
pass |
101
|
|
|
|
102
|
|
|
|
103
|
|
|
def make_failure(message: str): |
104
|
|
|
""" Build a test that unconditionally fails, used to flag errors in dynamic test generation |
105
|
|
|
|
106
|
|
|
:param message: the description of the error |
107
|
|
|
:return: a unit test function |
108
|
|
|
""" |
109
|
|
|
|
110
|
|
|
def test(self): |
|
|
|
|
111
|
|
|
self.fail(message) # unconditionally fail with the provided error message |
112
|
|
|
|
113
|
|
|
return test |
114
|
|
|
|
115
|
|
|
|
116
|
|
|
def make_tst_function(description: str, problem_number: int, solver: Callable[[None], Union[int, str]], |
|
|
|
|
117
|
|
|
expected_answer: Union[int, str]): |
118
|
|
|
""" Build a test that computes the answer to the given problem and checks its correctness |
119
|
|
|
|
120
|
|
|
:param description: a label to attach to this test case |
121
|
|
|
:param problem_number: the Project Euler problem number |
122
|
|
|
:param solver: the function that computes the answer |
123
|
|
|
:param expected_answer: the expected answer to this problem |
124
|
|
|
:return: a unit test function |
125
|
|
|
""" |
126
|
|
|
|
127
|
|
|
def test(self): |
|
|
|
|
128
|
|
|
# Compute the answer using the given solver, note the time taken |
129
|
|
|
t0 = time() |
|
|
|
|
130
|
|
|
computed_answer = solver() |
131
|
|
|
t1 = time() |
|
|
|
|
132
|
|
|
|
133
|
|
|
# Record the results (correctness and run-time) in the global, shared, variable |
134
|
|
|
global results |
|
|
|
|
135
|
|
|
results[problem_number] = {"correct": computed_answer == expected_answer, "time": t1 - t0} |
136
|
|
|
|
137
|
|
|
# Check that the computed answer matches the expected one |
138
|
|
|
self.assertEqual(computed_answer, expected_answer, description) |
139
|
|
|
|
140
|
|
|
return test |
141
|
|
|
|
142
|
|
|
|
143
|
|
|
def register_solutions(): |
144
|
|
|
""" Dynamically load and run each solution present, test the computed answers match expected answers """ |
|
|
|
|
145
|
|
|
|
146
|
|
|
# Dynamically identify all solutions and for each solved problem, check the answer for correctness |
|
|
|
|
147
|
|
|
for importer, modname, is_package in pkgutil.iter_modules([SOLUTION_MODULE_ROOT]): |
|
|
|
|
148
|
|
|
rem = re.match("^problem(?P<problem_number>\d+)", modname) |
|
|
|
|
149
|
|
|
problem_number = int(rem.group("problem_number")) |
150
|
|
|
test_name = "test_problem_{:03d}".format(problem_number) |
151
|
|
|
|
152
|
|
|
# Load this solution |
153
|
|
|
try: |
154
|
|
|
mod = importlib.import_module("{}.{}".format(SOLUTION_MODULE_ROOT, modname)) |
155
|
|
|
except ModuleNotFoundError: |
|
|
|
|
156
|
|
|
err_msg = "solution for problem {} doesn't currently exist.".format(problem_number) |
157
|
|
|
test_func = make_failure(err_msg) |
158
|
|
|
setattr(TestSolutions, test_name, test_func) |
159
|
|
|
continue # cannot run the test |
160
|
|
|
|
161
|
|
|
# Retrieve the expected answer |
162
|
|
|
try: |
163
|
|
|
expected_answer = mod.expected_answer |
164
|
|
|
except AttributeError: |
165
|
|
|
err_msg = "expected answer for problem {} doesn't currently exist.".format(problem_number) |
|
|
|
|
166
|
|
|
test_func = make_failure(err_msg) |
167
|
|
|
setattr(TestSolutions, test_name, test_func) |
168
|
|
|
continue # cannot validate answer without an expected one |
169
|
|
|
|
170
|
|
|
# Check that the expected answer has a value |
171
|
|
|
if expected_answer is None: |
172
|
|
|
err_msg = "expected answer for problem {} hasn't been set (i.e. is still None).".format(problem_number) |
|
|
|
|
173
|
|
|
test_func = make_failure(err_msg) |
174
|
|
|
setattr(TestSolutions, test_name, test_func) |
175
|
|
|
continue # cannot validate answer without an expected one |
176
|
|
|
|
177
|
|
|
# Register a test function for this solution |
178
|
|
|
test_func = make_tst_function(test_name, problem_number, mod.solve, mod.expected_answer) |
179
|
|
|
setattr(TestSolutions, test_name, test_func) |
180
|
|
|
|
This check looks for invalid names for a range of different identifiers.
You can set regular expressions to which the identifiers must conform if the defaults do not match your requirements.
If your project includes a Pylint configuration file, the settings contained in that file take precedence.
To find out more about Pylint, please refer to their site.