|
1
|
|
|
import os |
|
2
|
|
|
import re |
|
3
|
|
|
import sys |
|
4
|
|
|
import unittest |
|
5
|
|
|
from unittest.mock import ANY, Mock |
|
6
|
|
|
|
|
7
|
|
|
from coalib.bearlib.abstractions.Linter import linter |
|
8
|
|
|
from coalib.results.Diff import Diff |
|
9
|
|
|
from coalib.results.Result import Result |
|
10
|
|
|
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY |
|
11
|
|
|
from coalib.results.SourceRange import SourceRange |
|
12
|
|
|
from coalib.settings.Section import Section |
|
13
|
|
|
|
|
14
|
|
|
|
|
15
|
|
|
def get_testfile_name(name): |
|
16
|
|
|
""" |
|
17
|
|
|
Gets the full path to a testfile inside ``linter_test_files`` directory. |
|
18
|
|
|
|
|
19
|
|
|
:param name: The filename of the testfile to get the full path for. |
|
20
|
|
|
:return: The full path to given testfile name. |
|
21
|
|
|
""" |
|
22
|
|
|
return os.path.join(os.path.dirname(os.path.realpath(__file__)), |
|
23
|
|
|
"linter_test_files", |
|
24
|
|
|
name) |
|
25
|
|
|
|
|
26
|
|
|
|
|
27
|
|
|
class LinterComponentTest(unittest.TestCase): |
|
28
|
|
|
|
|
29
|
|
|
# Using `object` instead of an empty class results in inheritance problems |
|
30
|
|
|
# inside the linter decorator. |
|
31
|
|
|
class EmptyTestLinter: |
|
32
|
|
|
pass |
|
33
|
|
|
|
|
34
|
|
|
class ManualProcessingTestLinter: |
|
35
|
|
|
|
|
36
|
|
|
def process_output(self, *args, **kwargs): |
|
37
|
|
|
pass |
|
38
|
|
|
|
|
39
|
|
|
def setUp(self): |
|
40
|
|
|
self.section = Section("TEST_SECTION") |
|
41
|
|
|
|
|
42
|
|
View Code Duplication |
def test_decorator_invalid_parameters(self): |
|
|
|
|
|
|
43
|
|
|
with self.assertRaises(ValueError) as cm: |
|
44
|
|
|
linter("some-executable", invalid_arg=88, ABC=2000) |
|
45
|
|
|
self.assertEqual( |
|
46
|
|
|
str(cm.exception), |
|
47
|
|
|
"Invalid keyword arguments provided: 'ABC', 'invalid_arg'") |
|
48
|
|
|
|
|
49
|
|
|
with self.assertRaises(ValueError) as cm: |
|
50
|
|
|
linter("some-executable", diff_severity=RESULT_SEVERITY.MAJOR) |
|
51
|
|
|
self.assertEqual(str(cm.exception), |
|
52
|
|
|
"Invalid keyword arguments provided: 'diff_severity'") |
|
53
|
|
|
|
|
54
|
|
|
with self.assertRaises(ValueError) as cm: |
|
55
|
|
|
linter("some-executable", result_message="Custom message") |
|
56
|
|
|
self.assertEqual(str(cm.exception), |
|
57
|
|
|
"Invalid keyword arguments provided: " |
|
58
|
|
|
"'result_message'") |
|
59
|
|
|
|
|
60
|
|
|
with self.assertRaises(ValueError) as cm: |
|
61
|
|
|
linter("some-executable", |
|
62
|
|
|
output_format="corrected", |
|
63
|
|
|
output_regex=".*") |
|
64
|
|
|
self.assertEqual(str(cm.exception), |
|
65
|
|
|
"Invalid keyword arguments provided: 'output_regex'") |
|
66
|
|
|
|
|
67
|
|
|
with self.assertRaises(ValueError) as cm: |
|
68
|
|
|
linter("some-executable", |
|
69
|
|
|
output_format="corrected", |
|
70
|
|
|
severity_map={}) |
|
71
|
|
|
self.assertEqual(str(cm.exception), |
|
72
|
|
|
"Invalid keyword arguments provided: 'severity_map'") |
|
73
|
|
|
|
|
74
|
|
|
with self.assertRaises(ValueError) as cm: |
|
75
|
|
|
linter("some-executable", |
|
76
|
|
|
prerequisite_check_fail_message="some_message") |
|
77
|
|
|
self.assertEqual(str(cm.exception), |
|
78
|
|
|
"Invalid keyword arguments provided: " |
|
79
|
|
|
"'prerequisite_check_fail_message'") |
|
80
|
|
|
|
|
81
|
|
View Code Duplication |
def test_decorator_invalid_states(self): |
|
|
|
|
|
|
82
|
|
|
with self.assertRaises(ValueError) as cm: |
|
83
|
|
|
linter("some-executable", use_stdout=False, use_stderr=False) |
|
84
|
|
|
self.assertEqual(str(cm.exception), |
|
85
|
|
|
"No output streams provided at all.") |
|
86
|
|
|
|
|
87
|
|
|
with self.assertRaises(ValueError) as cm: |
|
88
|
|
|
linter("some-executable", output_format="INVALID") |
|
89
|
|
|
self.assertEqual(str(cm.exception), |
|
90
|
|
|
"Invalid `output_format` specified.") |
|
91
|
|
|
|
|
92
|
|
|
with self.assertRaises(ValueError) as cm: |
|
93
|
|
|
linter("some-executable", output_format="regex") |
|
94
|
|
|
self.assertEqual( |
|
95
|
|
|
str(cm.exception), |
|
96
|
|
|
"`output_regex` needed when specified output-format 'regex'.") |
|
97
|
|
|
|
|
98
|
|
|
with self.assertRaises(ValueError) as cm: |
|
99
|
|
|
linter("some-executable", |
|
100
|
|
|
output_format="regex", |
|
101
|
|
|
output_regex="", |
|
102
|
|
|
severity_map={}) |
|
103
|
|
|
self.assertEqual( |
|
104
|
|
|
str(cm.exception), |
|
105
|
|
|
"Provided `severity_map` but named group `severity` is not used " |
|
106
|
|
|
"in `output_regex`.") |
|
107
|
|
|
|
|
108
|
|
|
with self.assertRaises(ValueError) as cm: |
|
109
|
|
|
linter("some-executable")(object) |
|
110
|
|
|
self.assertEqual( |
|
111
|
|
|
str(cm.exception), |
|
112
|
|
|
"`process_output` not provided by given class 'object'.") |
|
113
|
|
|
|
|
114
|
|
|
with self.assertRaises(ValueError) as cm: |
|
115
|
|
|
(linter("some-executable", output_format="regex", output_regex="") |
|
116
|
|
|
(self.ManualProcessingTestLinter)) |
|
117
|
|
|
self.assertEqual( |
|
118
|
|
|
str(cm.exception), |
|
119
|
|
|
"Found `process_output` already defined by class " |
|
120
|
|
|
"'ManualProcessingTestLinter', but 'regex' output-format is " |
|
121
|
|
|
"specified.") |
|
122
|
|
|
|
|
123
|
|
|
def test_decorator_generated_default_interface(self): |
|
124
|
|
|
uut = linter("some-executable")(self.ManualProcessingTestLinter) |
|
125
|
|
|
with self.assertRaises(NotImplementedError): |
|
126
|
|
|
uut.create_arguments("filename", "content", None) |
|
127
|
|
|
|
|
128
|
|
|
def test_decorator_invalid_parameter_types(self): |
|
129
|
|
|
# Provide some invalid severity maps. |
|
130
|
|
|
with self.assertRaises(TypeError): |
|
131
|
|
|
linter("some-executable", |
|
132
|
|
|
output_format="regex", |
|
133
|
|
|
output_regex="(?P<severity>)", |
|
134
|
|
|
severity_map=list()) |
|
135
|
|
|
|
|
136
|
|
|
with self.assertRaises(TypeError): |
|
137
|
|
|
linter("some-executable", |
|
138
|
|
|
output_format="regex", |
|
139
|
|
|
output_regex="(?P<severity>)", |
|
140
|
|
|
severity_map={3: 0}) |
|
141
|
|
|
|
|
142
|
|
|
with self.assertRaises(TypeError) as cm: |
|
143
|
|
|
linter("some-executable", |
|
144
|
|
|
output_format="regex", |
|
145
|
|
|
output_regex="(?P<severity>)", |
|
146
|
|
|
severity_map={"critical": "invalid"}) |
|
147
|
|
|
self.assertEqual(str(cm.exception), |
|
148
|
|
|
"The value 'invalid' for key 'critical' inside given " |
|
149
|
|
|
"severity-map is no valid severity value.") |
|
150
|
|
|
|
|
151
|
|
|
with self.assertRaises(TypeError) as cm: |
|
152
|
|
|
linter("some-executable", |
|
153
|
|
|
output_format="regex", |
|
154
|
|
|
output_regex="(?P<severity>)", |
|
155
|
|
|
severity_map={"critical-error": 389274234}) |
|
156
|
|
|
self.assertEqual(str(cm.exception), |
|
157
|
|
|
"Invalid severity value 389274234 for key " |
|
158
|
|
|
"'critical-error' inside given severity-map.") |
|
159
|
|
|
|
|
160
|
|
|
# Other type-error test cases. |
|
161
|
|
|
|
|
162
|
|
|
with self.assertRaises(TypeError): |
|
163
|
|
|
linter("some-executable", |
|
164
|
|
|
output_format="regex", |
|
165
|
|
|
output_regex="(?P<message>)", |
|
166
|
|
|
result_message=None) |
|
167
|
|
|
|
|
168
|
|
|
with self.assertRaises(TypeError): |
|
169
|
|
|
linter("some-executable", |
|
170
|
|
|
output_format="corrected", |
|
171
|
|
|
result_message=list()) |
|
172
|
|
|
|
|
173
|
|
|
with self.assertRaises(TypeError) as cm: |
|
174
|
|
|
linter("some-executable", |
|
175
|
|
|
output_format="corrected", |
|
176
|
|
|
diff_severity=999888777) |
|
177
|
|
|
self.assertEqual(str(cm.exception), |
|
178
|
|
|
"Invalid value for `diff_severity`: 999888777") |
|
179
|
|
|
|
|
180
|
|
|
with self.assertRaises(TypeError): |
|
181
|
|
|
linter("some-executable", |
|
182
|
|
|
prerequisite_check_command=("command",), |
|
183
|
|
|
prerequisite_check_fail_message=382983) |
|
184
|
|
|
|
|
185
|
|
|
def test_get_executable(self): |
|
186
|
|
|
uut = linter("some-executable")(self.ManualProcessingTestLinter) |
|
187
|
|
|
self.assertEqual(uut.get_executable(), "some-executable") |
|
188
|
|
|
|
|
189
|
|
|
def test_check_prerequisites(self): |
|
190
|
|
|
uut = linter(sys.executable)(self.ManualProcessingTestLinter) |
|
191
|
|
|
self.assertTrue(uut.check_prerequisites()) |
|
192
|
|
|
|
|
193
|
|
|
uut = (linter("invalid_nonexisting_programv412") |
|
194
|
|
|
(self.ManualProcessingTestLinter)) |
|
195
|
|
|
self.assertEqual(uut.check_prerequisites(), |
|
196
|
|
|
"'invalid_nonexisting_programv412' is not installed.") |
|
197
|
|
|
|
|
198
|
|
|
uut = (linter("invalid_nonexisting_programv412", |
|
199
|
|
|
executable_check_fail_info="You can't install it.") |
|
200
|
|
|
(self.ManualProcessingTestLinter)) |
|
201
|
|
|
self.assertEqual(uut.check_prerequisites(), |
|
202
|
|
|
"'invalid_nonexisting_programv412' is not installed. " |
|
203
|
|
|
"You can't install it.") |
|
204
|
|
|
|
|
205
|
|
|
uut = (linter(sys.executable, |
|
206
|
|
|
prerequisite_check_command=(sys.executable, "--version")) |
|
207
|
|
|
(self.ManualProcessingTestLinter)) |
|
208
|
|
|
self.assertTrue(uut.check_prerequisites()) |
|
209
|
|
|
|
|
210
|
|
|
uut = (linter(sys.executable, |
|
211
|
|
|
prerequisite_check_command=("invalid_programv413",)) |
|
212
|
|
|
(self.ManualProcessingTestLinter)) |
|
213
|
|
|
self.assertEqual(uut.check_prerequisites(), |
|
214
|
|
|
"Prerequisite check failed.") |
|
215
|
|
|
|
|
216
|
|
|
uut = (linter(sys.executable, |
|
217
|
|
|
prerequisite_check_command=("invalid_programv413",), |
|
218
|
|
|
prerequisite_check_fail_message="NOPE") |
|
219
|
|
|
(self.ManualProcessingTestLinter)) |
|
220
|
|
|
self.assertEqual(uut.check_prerequisites(), "NOPE") |
|
221
|
|
|
|
|
222
|
|
|
def test_output_stream(self): |
|
223
|
|
|
process_output_mock = Mock() |
|
224
|
|
|
|
|
225
|
|
|
class TestLinter: |
|
226
|
|
|
|
|
227
|
|
|
@staticmethod |
|
228
|
|
|
def process_output(output, filename, file): |
|
229
|
|
|
process_output_mock(output, filename, file) |
|
230
|
|
|
|
|
231
|
|
|
@staticmethod |
|
232
|
|
|
def create_arguments(filename, file, config_file): |
|
233
|
|
|
code = "\n".join(["import sys", |
|
234
|
|
|
"print('hello stdout')", |
|
235
|
|
|
"print('hello stderr', file=sys.stderr)"]) |
|
236
|
|
|
return "-c", code |
|
237
|
|
|
|
|
238
|
|
|
uut = (linter(sys.executable, use_stdout=True) |
|
|
|
|
|
|
239
|
|
|
(TestLinter) |
|
240
|
|
|
(self.section, None)) |
|
241
|
|
|
uut.run("", []) |
|
242
|
|
|
|
|
243
|
|
|
process_output_mock.assert_called_once_with("hello stdout\n", "", []) |
|
244
|
|
|
process_output_mock.reset_mock() |
|
245
|
|
|
|
|
246
|
|
|
uut = (linter(sys.executable, use_stdout=False, use_stderr=True) |
|
|
|
|
|
|
247
|
|
|
(TestLinter) |
|
248
|
|
|
(self.section, None)) |
|
249
|
|
|
uut.run("", []) |
|
250
|
|
|
|
|
251
|
|
|
process_output_mock.assert_called_once_with("hello stderr\n", "", []) |
|
252
|
|
|
process_output_mock.reset_mock() |
|
253
|
|
|
|
|
254
|
|
|
uut = (linter(sys.executable, use_stdout=True, use_stderr=True) |
|
|
|
|
|
|
255
|
|
|
(TestLinter) |
|
256
|
|
|
(self.section, None)) |
|
257
|
|
|
|
|
258
|
|
|
uut.run("", []) |
|
259
|
|
|
|
|
260
|
|
|
process_output_mock.assert_called_once_with(("hello stdout\n", |
|
261
|
|
|
"hello stderr\n"), "", []) |
|
262
|
|
|
|
|
263
|
|
|
def test_process_output_corrected(self): |
|
264
|
|
|
uut = (linter(sys.executable, output_format="corrected") |
|
|
|
|
|
|
265
|
|
|
(self.EmptyTestLinter) |
|
266
|
|
|
(self.section, None)) |
|
267
|
|
|
|
|
268
|
|
|
original = ["void main() {\n", "return 09;\n", "}\n"] |
|
269
|
|
|
fixed = ["void main()\n", "{\n", "return 9;\n", "}\n"] |
|
270
|
|
|
fixed_string = "".join(fixed) |
|
271
|
|
|
|
|
272
|
|
|
results = list(uut.process_output(fixed_string, |
|
273
|
|
|
"some-file.c", |
|
274
|
|
|
original)) |
|
275
|
|
|
|
|
276
|
|
|
diffs = list(Diff.from_string_arrays(original, fixed).split_diff()) |
|
277
|
|
|
expected = [Result.from_values(uut, |
|
278
|
|
|
"Inconsistency found.", |
|
279
|
|
|
"some-file.c", |
|
280
|
|
|
1, None, 2, None, |
|
281
|
|
|
RESULT_SEVERITY.NORMAL, |
|
282
|
|
|
diffs={"some-file.c": diffs[0]})] |
|
283
|
|
|
|
|
284
|
|
|
self.assertEqual(results, expected) |
|
285
|
|
|
|
|
286
|
|
|
# Test when providing a sequence as output. |
|
287
|
|
|
|
|
288
|
|
|
results = list(uut.process_output([fixed_string, fixed_string], |
|
289
|
|
|
"some-file.c", |
|
290
|
|
|
original)) |
|
291
|
|
|
self.assertEqual(results, 2 * expected) |
|
292
|
|
|
|
|
293
|
|
|
# Test diff_distance |
|
294
|
|
|
|
|
295
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
296
|
|
|
output_format="corrected", |
|
297
|
|
|
diff_distance=-1) |
|
298
|
|
|
(self.EmptyTestLinter) |
|
299
|
|
|
(self.section, None)) |
|
300
|
|
|
|
|
301
|
|
|
results = list(uut.process_output(fixed_string, |
|
302
|
|
|
"some-file.c", |
|
303
|
|
|
original)) |
|
304
|
|
|
self.assertEqual(len(results), 2) |
|
305
|
|
|
|
|
306
|
|
|
def test_process_output_regex(self): |
|
307
|
|
|
# Also test the case when an unknown severity is matched. |
|
308
|
|
|
test_output = ("12:4-14:0-Serious issue (error) -> ORIGIN=X -> D\n" |
|
309
|
|
|
"0:0-0:1-This is a warning (warning) -> ORIGIN=Y -> A\n" |
|
310
|
|
|
"813:77-1024:32-Just a note (info) -> ORIGIN=Z -> C\n" |
|
311
|
|
|
"0:0-0:0-Some unknown sev (???) -> ORIGIN=W -> B\n") |
|
312
|
|
|
regex = (r"(?P<line>\d+):(?P<column>\d+)-" |
|
313
|
|
|
r"(?P<end_line>\d+):(?P<end_column>\d+)-" |
|
314
|
|
|
r"(?P<message>.*) \((?P<severity>.*)\) -> " |
|
315
|
|
|
r"ORIGIN=(?P<origin>.*) -> (?P<additional_info>.*)") |
|
316
|
|
|
|
|
317
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
318
|
|
|
output_format="regex", |
|
319
|
|
|
output_regex=regex) |
|
320
|
|
|
(self.EmptyTestLinter) |
|
321
|
|
|
(self.section, None)) |
|
322
|
|
|
uut.warn = Mock() |
|
323
|
|
|
|
|
324
|
|
|
sample_file = "some-file.xtx" |
|
325
|
|
|
results = list(uut.process_output(test_output, sample_file, [""])) |
|
326
|
|
|
expected = [Result.from_values("EmptyTestLinter (X)", |
|
327
|
|
|
"Serious issue", |
|
328
|
|
|
sample_file, |
|
329
|
|
|
12, 4, 14, 0, |
|
330
|
|
|
RESULT_SEVERITY.MAJOR, |
|
331
|
|
|
additional_info="D"), |
|
332
|
|
|
Result.from_values("EmptyTestLinter (Y)", |
|
333
|
|
|
"This is a warning", |
|
334
|
|
|
sample_file, |
|
335
|
|
|
0, 0, 0, 1, |
|
336
|
|
|
RESULT_SEVERITY.NORMAL, |
|
337
|
|
|
additional_info="A"), |
|
338
|
|
|
Result.from_values("EmptyTestLinter (Z)", |
|
339
|
|
|
"Just a note", |
|
340
|
|
|
sample_file, |
|
341
|
|
|
813, 77, 1024, 32, |
|
342
|
|
|
RESULT_SEVERITY.INFO, |
|
343
|
|
|
additional_info="C"), |
|
344
|
|
|
Result.from_values("EmptyTestLinter (W)", |
|
345
|
|
|
"Some unknown sev", |
|
346
|
|
|
sample_file, |
|
347
|
|
|
0, 0, 0, 0, |
|
348
|
|
|
RESULT_SEVERITY.NORMAL, |
|
349
|
|
|
additional_info="B")] |
|
350
|
|
|
|
|
351
|
|
|
self.assertEqual(results, expected) |
|
352
|
|
|
uut.warn.assert_called_once_with( |
|
353
|
|
|
"'???' not found in severity-map. Assuming " |
|
354
|
|
|
"`RESULT_SEVERITY.NORMAL`.") |
|
355
|
|
|
|
|
356
|
|
|
# Test when providing a sequence as output. |
|
357
|
|
|
test_output = ["", |
|
358
|
|
|
"12:4-14:0-Serious issue (error) -> ORIGIN=X -> XYZ\n"] |
|
359
|
|
|
results = list(uut.process_output(test_output, sample_file, [""])) |
|
360
|
|
|
expected = [Result.from_values("EmptyTestLinter (X)", |
|
361
|
|
|
"Serious issue", |
|
362
|
|
|
sample_file, |
|
363
|
|
|
12, 4, 14, 0, |
|
364
|
|
|
RESULT_SEVERITY.MAJOR, |
|
365
|
|
|
additional_info="XYZ")] |
|
366
|
|
|
|
|
367
|
|
|
self.assertEqual(results, expected) |
|
368
|
|
|
|
|
369
|
|
|
# Test with using `result_message` parameter. |
|
370
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
371
|
|
|
output_format="regex", |
|
372
|
|
|
output_regex=regex, |
|
373
|
|
|
result_message="Hello world") |
|
374
|
|
|
(self.EmptyTestLinter) |
|
375
|
|
|
(self.section, None)) |
|
376
|
|
|
|
|
377
|
|
|
results = list(uut.process_output(test_output, sample_file, [""])) |
|
378
|
|
|
expected = [Result.from_values("EmptyTestLinter (X)", |
|
379
|
|
|
"Hello world", |
|
380
|
|
|
sample_file, |
|
381
|
|
|
12, 4, 14, 0, |
|
382
|
|
|
RESULT_SEVERITY.MAJOR, |
|
383
|
|
|
additional_info="XYZ")] |
|
384
|
|
|
|
|
385
|
|
|
self.assertEqual(results, expected) |
|
386
|
|
|
|
|
387
|
|
|
def test_minimal_regex(self): |
|
388
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
389
|
|
|
output_format="regex", |
|
390
|
|
|
output_regex="an_issue") |
|
391
|
|
|
(self.EmptyTestLinter) |
|
392
|
|
|
(self.section, None)) |
|
393
|
|
|
|
|
394
|
|
|
results = list(uut.process_output(['not an issue'], 'file', [""])) |
|
395
|
|
|
self.assertEqual(results, []) |
|
396
|
|
|
|
|
397
|
|
|
results = list(uut.process_output(['an_issue'], 'file', [""])) |
|
398
|
|
|
self.assertEqual(results, [Result.from_values("EmptyTestLinter", "", |
|
399
|
|
|
file="file")]) |
|
400
|
|
|
|
|
401
|
|
|
def test_get_non_optional_settings(self): |
|
402
|
|
|
class Handler(self.ManualProcessingTestLinter): |
|
403
|
|
|
|
|
404
|
|
|
@staticmethod |
|
405
|
|
|
def create_arguments(filename, file, config_file, param_x: int): |
|
406
|
|
|
pass |
|
407
|
|
|
|
|
408
|
|
|
@staticmethod |
|
409
|
|
|
def generate_config(filename, file, superparam): |
|
410
|
|
|
""" |
|
411
|
|
|
:param superparam: A superparam! |
|
412
|
|
|
""" |
|
413
|
|
|
return None |
|
414
|
|
|
|
|
415
|
|
|
uut = linter(sys.executable)(Handler) |
|
416
|
|
|
|
|
417
|
|
|
self.assertEqual(uut.get_non_optional_settings(), |
|
418
|
|
|
{"param_x": ("No description given.", int), |
|
419
|
|
|
"superparam": ("A superparam!", None)}) |
|
420
|
|
|
|
|
421
|
|
|
def test_process_output_metadata_omits_on_builtin_formats(self): |
|
422
|
|
|
uut = (linter(executable='', output_format='corrected') |
|
423
|
|
|
(self.EmptyTestLinter)) |
|
424
|
|
|
# diff_severity and result_message should now not occur inside the |
|
425
|
|
|
# metadata definition. |
|
426
|
|
|
self.assertNotIn("diff_severity", uut.get_metadata().optional_params) |
|
427
|
|
|
self.assertNotIn("result_message", uut.get_metadata().optional_params) |
|
428
|
|
|
self.assertNotIn("diff_severity", |
|
429
|
|
|
uut.get_metadata().non_optional_params) |
|
430
|
|
|
self.assertNotIn("result_message", |
|
431
|
|
|
uut.get_metadata().non_optional_params) |
|
432
|
|
|
|
|
433
|
|
|
# But every parameter manually defined in process_output shall appear |
|
434
|
|
|
# inside the metadata signature. |
|
435
|
|
|
class Handler: |
|
436
|
|
|
|
|
437
|
|
|
@staticmethod |
|
438
|
|
|
def create_arguments(filename, file, config_file): |
|
439
|
|
|
pass |
|
440
|
|
|
|
|
441
|
|
|
@staticmethod |
|
442
|
|
|
def process_output(output, filename, file, diff_severity): |
|
443
|
|
|
pass |
|
444
|
|
|
|
|
445
|
|
|
uut = linter(executable='')(Handler) |
|
446
|
|
|
self.assertIn("diff_severity", uut.get_metadata().non_optional_params) |
|
447
|
|
|
|
|
448
|
|
|
def test_section_settings_forwarding(self): |
|
449
|
|
|
create_arguments_mock = Mock() |
|
450
|
|
|
generate_config_mock = Mock() |
|
451
|
|
|
process_output_mock = Mock() |
|
452
|
|
|
|
|
453
|
|
|
class Handler(self.ManualProcessingTestLinter): |
|
454
|
|
|
|
|
455
|
|
|
@staticmethod |
|
456
|
|
|
def create_arguments(filename, file, config_file, my_param: int): |
|
457
|
|
|
create_arguments_mock(filename, file, config_file, my_param) |
|
458
|
|
|
# Execute python and do nothing. |
|
459
|
|
|
return "-c", "print('coala!')" |
|
460
|
|
|
|
|
461
|
|
|
@staticmethod |
|
462
|
|
|
def generate_config(filename, file, my_config_param: int): |
|
463
|
|
|
generate_config_mock(filename, file, my_config_param) |
|
464
|
|
|
return None |
|
465
|
|
|
|
|
466
|
|
|
def process_output(self, output, filename, file, makman2: str): |
|
467
|
|
|
process_output_mock(output, filename, file, makman2) |
|
468
|
|
|
|
|
469
|
|
|
self.section["my_param"] = "109" |
|
470
|
|
|
self.section["my_config_param"] = "88" |
|
471
|
|
|
self.section["makman2"] = "is cool" |
|
472
|
|
|
|
|
473
|
|
|
uut = linter(sys.executable)(Handler)(self.section, None) |
|
474
|
|
|
|
|
475
|
|
|
self.assertIsNotNone(list(uut.execute(filename="some_file.cs", |
|
476
|
|
|
file=[]))) |
|
477
|
|
|
create_arguments_mock.assert_called_once_with( |
|
478
|
|
|
"some_file.cs", [], None, 109) |
|
479
|
|
|
generate_config_mock.assert_called_once_with("some_file.cs", [], 88) |
|
480
|
|
|
process_output_mock.assert_called_once_with( |
|
481
|
|
|
"coala!\n", "some_file.cs", [], "is cool") |
|
482
|
|
|
|
|
483
|
|
|
def test_section_settings_defaults_forwarding(self): |
|
484
|
|
|
create_arguments_mock = Mock() |
|
485
|
|
|
generate_config_mock = Mock() |
|
486
|
|
|
process_output_mock = Mock() |
|
487
|
|
|
|
|
488
|
|
|
class Handler: |
|
489
|
|
|
|
|
490
|
|
|
@staticmethod |
|
491
|
|
|
def generate_config(filename, file, some_default: str="x"): |
|
492
|
|
|
generate_config_mock(filename, file, some_default) |
|
493
|
|
|
return None |
|
494
|
|
|
|
|
495
|
|
|
@staticmethod |
|
496
|
|
|
def create_arguments(filename, file, config_file, default: int=3): |
|
497
|
|
|
create_arguments_mock( |
|
498
|
|
|
filename, file, config_file, default) |
|
499
|
|
|
return "-c", "print('hello')" |
|
500
|
|
|
|
|
501
|
|
|
@staticmethod |
|
502
|
|
|
def process_output(output, filename, file, xxx: int=64): |
|
503
|
|
|
process_output_mock(output, filename, file, xxx) |
|
504
|
|
|
|
|
505
|
|
|
uut = linter(sys.executable)(Handler)(self.section, None) |
|
506
|
|
|
|
|
507
|
|
|
self.assertIsNotNone(list(uut.execute(filename="abc.py", file=[]))) |
|
508
|
|
|
create_arguments_mock.assert_called_once_with("abc.py", [], None, 3) |
|
509
|
|
|
generate_config_mock.assert_called_once_with("abc.py", [], "x") |
|
510
|
|
|
process_output_mock.assert_called_once_with( |
|
511
|
|
|
"hello\n", "abc.py", [], 64) |
|
512
|
|
|
|
|
513
|
|
|
create_arguments_mock.reset_mock() |
|
514
|
|
|
generate_config_mock.reset_mock() |
|
515
|
|
|
process_output_mock.reset_mock() |
|
516
|
|
|
|
|
517
|
|
|
self.section["default"] = "1000" |
|
518
|
|
|
self.section["some_default"] = "xyz" |
|
519
|
|
|
self.section["xxx"] = "-50" |
|
520
|
|
|
self.assertIsNotNone(list(uut.execute(filename="def.py", file=[]))) |
|
521
|
|
|
create_arguments_mock.assert_called_once_with("def.py", [], None, 1000) |
|
522
|
|
|
generate_config_mock.assert_called_once_with("def.py", [], "xyz") |
|
523
|
|
|
process_output_mock.assert_called_once_with( |
|
524
|
|
|
"hello\n", "def.py", [], -50) |
|
525
|
|
|
|
|
526
|
|
|
def test_invalid_arguments(self): |
|
527
|
|
|
|
|
528
|
|
|
class InvalidArgumentsLinter(self.ManualProcessingTestLinter): |
|
529
|
|
|
|
|
530
|
|
|
@staticmethod |
|
531
|
|
|
def create_arguments(filename, file, config_file): |
|
532
|
|
|
return None |
|
533
|
|
|
|
|
534
|
|
|
uut = (linter(sys.executable)(InvalidArgumentsLinter) |
|
|
|
|
|
|
535
|
|
|
(self.section, None)) |
|
536
|
|
|
self.assertEqual(uut.run("", []), None) |
|
537
|
|
|
|
|
538
|
|
|
def test_generate_config(self): |
|
539
|
|
|
uut = linter("")(self.ManualProcessingTestLinter) |
|
540
|
|
|
with uut._create_config("filename", []) as config_file: |
|
541
|
|
|
self.assertIsNone(config_file) |
|
542
|
|
|
|
|
543
|
|
|
class ConfigurationTestLinter(self.ManualProcessingTestLinter): |
|
544
|
|
|
|
|
545
|
|
|
@staticmethod |
|
546
|
|
|
def generate_config(filename, file, val): |
|
547
|
|
|
return "config_value = " + str(val) |
|
548
|
|
|
|
|
549
|
|
|
uut = linter("", config_suffix=".xml")(ConfigurationTestLinter) |
|
550
|
|
|
with uut._create_config("filename", [], val=88) as config_file: |
|
551
|
|
|
self.assertTrue(os.path.isfile(config_file)) |
|
552
|
|
|
self.assertEqual(config_file[-4:], ".xml") |
|
553
|
|
|
with open(config_file, mode="r") as fl: |
|
554
|
|
|
self.assertEqual(fl.read(), "config_value = 88") |
|
555
|
|
|
self.assertFalse(os.path.isfile(config_file)) |
|
556
|
|
|
|
|
557
|
|
|
def test_metaclass_repr(self): |
|
558
|
|
|
uut = linter("my-tool")(self.ManualProcessingTestLinter) |
|
559
|
|
|
self.assertEqual( |
|
560
|
|
|
repr(uut), |
|
561
|
|
|
"<ManualProcessingTestLinter linter class (wrapping 'my-tool')>") |
|
562
|
|
|
|
|
563
|
|
|
# Test also whether derivatives change the class name accordingly. |
|
564
|
|
|
class DerivedLinter(uut): |
|
565
|
|
|
pass |
|
566
|
|
|
self.assertEqual(repr(DerivedLinter), |
|
567
|
|
|
"<DerivedLinter linter class (wrapping 'my-tool')>") |
|
568
|
|
|
|
|
569
|
|
|
def test_repr(self): |
|
570
|
|
|
uut = (linter(sys.executable) |
|
|
|
|
|
|
571
|
|
|
(self.ManualProcessingTestLinter) |
|
572
|
|
|
(self.section, None)) |
|
573
|
|
|
|
|
574
|
|
|
self.assertRegex( |
|
575
|
|
|
repr(uut), |
|
576
|
|
|
"<ManualProcessingTestLinter linter object \\(wrapping " + |
|
577
|
|
|
re.escape(repr(sys.executable)) + "\\) at 0x[a-fA-F0-9]+>") |
|
578
|
|
|
|
|
579
|
|
|
|
|
580
|
|
|
class LinterReallifeTest(unittest.TestCase): |
|
581
|
|
|
|
|
582
|
|
|
def setUp(self): |
|
583
|
|
|
self.section = Section("REALLIFE_TEST_SECTION") |
|
584
|
|
|
|
|
585
|
|
|
self.test_program_path = get_testfile_name("test_linter.py") |
|
586
|
|
|
self.test_program_regex = ( |
|
587
|
|
|
r"L(?P<line>\d+)C(?P<column>\d+)-" |
|
588
|
|
|
r"L(?P<end_line>\d+)C(?P<end_column>\d+):" |
|
589
|
|
|
r" (?P<message>.*) \| (?P<severity>.+) SEVERITY") |
|
590
|
|
|
self.test_program_severity_map = {"MAJOR": RESULT_SEVERITY.MAJOR} |
|
591
|
|
|
|
|
592
|
|
|
self.testfile_path = get_testfile_name("test_file.txt") |
|
593
|
|
|
with open(self.testfile_path, mode="r") as fl: |
|
594
|
|
|
self.testfile_content = fl.read().splitlines(keepends=True) |
|
595
|
|
|
|
|
596
|
|
|
self.testfile2_path = get_testfile_name("test_file2.txt") |
|
597
|
|
|
with open(self.testfile2_path, mode="r") as fl: |
|
598
|
|
|
self.testfile2_content = fl.read().splitlines(keepends=True) |
|
599
|
|
|
|
|
600
|
|
|
def test_nostdin_nostderr_noconfig_nocorrection(self): |
|
601
|
|
|
create_arguments_mock = Mock() |
|
602
|
|
|
|
|
603
|
|
|
class Handler: |
|
604
|
|
|
|
|
605
|
|
|
@staticmethod |
|
606
|
|
|
def create_arguments(filename, file, config_file): |
|
607
|
|
|
create_arguments_mock(filename, file, config_file) |
|
608
|
|
|
return self.test_program_path, filename |
|
609
|
|
|
|
|
610
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
611
|
|
|
output_format="regex", |
|
612
|
|
|
output_regex=self.test_program_regex, |
|
613
|
|
|
severity_map=self.test_program_severity_map) |
|
614
|
|
|
(Handler) |
|
615
|
|
|
(self.section, None)) |
|
616
|
|
|
|
|
617
|
|
|
results = list(uut.run(self.testfile_path, self.testfile_content)) |
|
618
|
|
|
expected = [Result.from_values(uut, |
|
619
|
|
|
"Invalid char ('0')", |
|
620
|
|
|
self.testfile_path, |
|
621
|
|
|
3, 0, 3, 1, |
|
622
|
|
|
RESULT_SEVERITY.MAJOR), |
|
623
|
|
|
Result.from_values(uut, |
|
624
|
|
|
"Invalid char ('.')", |
|
625
|
|
|
self.testfile_path, |
|
626
|
|
|
5, 0, 5, 1, |
|
627
|
|
|
RESULT_SEVERITY.MAJOR), |
|
628
|
|
|
Result.from_values(uut, |
|
629
|
|
|
"Invalid char ('p')", |
|
630
|
|
|
self.testfile_path, |
|
631
|
|
|
9, 0, 9, 1, |
|
632
|
|
|
RESULT_SEVERITY.MAJOR)] |
|
633
|
|
|
|
|
634
|
|
|
self.assertEqual(results, expected) |
|
635
|
|
|
create_arguments_mock.assert_called_once_with( |
|
636
|
|
|
self.testfile_path, self.testfile_content, None) |
|
637
|
|
|
|
|
638
|
|
|
def test_stdin_stderr_noconfig_nocorrection(self): |
|
639
|
|
|
create_arguments_mock = Mock() |
|
640
|
|
|
|
|
641
|
|
|
class Handler: |
|
642
|
|
|
|
|
643
|
|
|
@staticmethod |
|
644
|
|
|
def create_arguments(filename, file, config_file): |
|
645
|
|
|
create_arguments_mock(filename, file, config_file) |
|
646
|
|
|
return (self.test_program_path, |
|
647
|
|
|
"--use_stderr", |
|
648
|
|
|
"--use_stdin", |
|
649
|
|
|
filename) |
|
650
|
|
|
|
|
651
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
652
|
|
|
use_stdin=True, |
|
653
|
|
|
use_stdout=False, |
|
654
|
|
|
use_stderr=True, |
|
655
|
|
|
output_format="regex", |
|
656
|
|
|
output_regex=self.test_program_regex, |
|
657
|
|
|
severity_map=self.test_program_severity_map) |
|
658
|
|
|
(Handler) |
|
659
|
|
|
(self.section, None)) |
|
660
|
|
|
|
|
661
|
|
|
results = list(uut.run(self.testfile2_path, self.testfile2_content)) |
|
662
|
|
|
expected = [Result.from_values(uut, |
|
663
|
|
|
"Invalid char ('X')", |
|
664
|
|
|
self.testfile2_path, |
|
665
|
|
|
0, 0, 0, 1, |
|
666
|
|
|
RESULT_SEVERITY.MAJOR), |
|
667
|
|
|
Result.from_values(uut, |
|
668
|
|
|
"Invalid char ('i')", |
|
669
|
|
|
self.testfile2_path, |
|
670
|
|
|
4, 0, 4, 1, |
|
671
|
|
|
RESULT_SEVERITY.MAJOR)] |
|
672
|
|
|
|
|
673
|
|
|
self.assertEqual(results, expected) |
|
674
|
|
|
create_arguments_mock.assert_called_once_with( |
|
675
|
|
|
self.testfile2_path, self.testfile2_content, None) |
|
676
|
|
|
|
|
677
|
|
|
def test_nostdin_nostderr_noconfig_correction(self): |
|
678
|
|
|
create_arguments_mock = Mock() |
|
679
|
|
|
|
|
680
|
|
|
class Handler: |
|
681
|
|
|
|
|
682
|
|
|
@staticmethod |
|
683
|
|
|
def create_arguments(filename, file, config_file): |
|
684
|
|
|
create_arguments_mock(filename, file, config_file) |
|
685
|
|
|
return self.test_program_path, "--correct", filename |
|
686
|
|
|
|
|
687
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
688
|
|
|
output_format="corrected", |
|
689
|
|
|
diff_severity=RESULT_SEVERITY.INFO, |
|
690
|
|
|
result_message="Custom message") |
|
691
|
|
|
(Handler) |
|
692
|
|
|
(self.section, None)) |
|
693
|
|
|
|
|
694
|
|
|
results = list(uut.run(self.testfile_path, self.testfile_content)) |
|
695
|
|
|
|
|
696
|
|
|
expected_correction = [s + "\n" |
|
697
|
|
|
for s in ["+", "-", "*", "++", "-", "-", "+"]] |
|
698
|
|
|
|
|
699
|
|
|
diffs = list(Diff.from_string_arrays( |
|
700
|
|
|
self.testfile_content, |
|
701
|
|
|
expected_correction).split_diff()) |
|
702
|
|
|
|
|
703
|
|
|
expected = [Result(uut, "Custom message", |
|
704
|
|
|
affected_code=( |
|
705
|
|
|
SourceRange.from_values(self.testfile_path, 4), |
|
706
|
|
|
SourceRange.from_values(self.testfile_path, 6)), |
|
707
|
|
|
severity=RESULT_SEVERITY.INFO, |
|
708
|
|
|
diffs={self.testfile_path: diffs[0]}), |
|
709
|
|
|
Result.from_values(uut, |
|
710
|
|
|
"Custom message", |
|
711
|
|
|
self.testfile_path, |
|
712
|
|
|
10, None, 10, None, |
|
713
|
|
|
RESULT_SEVERITY.INFO, |
|
714
|
|
|
diffs={self.testfile_path: diffs[1]})] |
|
715
|
|
|
|
|
716
|
|
|
self.assertEqual(results, expected) |
|
717
|
|
|
create_arguments_mock.assert_called_once_with( |
|
718
|
|
|
self.testfile_path, self.testfile_content, None) |
|
719
|
|
|
|
|
720
|
|
|
def test_stdin_stdout_stderr_config_nocorrection(self): |
|
721
|
|
|
create_arguments_mock = Mock() |
|
722
|
|
|
generate_config_mock = Mock() |
|
723
|
|
|
|
|
724
|
|
|
class Handler: |
|
725
|
|
|
|
|
726
|
|
|
@staticmethod |
|
727
|
|
|
def generate_config(filename, file, some_val): |
|
728
|
|
|
# some_val shall only test the argument delegation from run(). |
|
729
|
|
|
generate_config_mock(filename, file, some_val) |
|
730
|
|
|
return "\n".join(["use_stdin", "use_stderr"]) |
|
731
|
|
|
|
|
732
|
|
|
@staticmethod |
|
733
|
|
|
def create_arguments(filename, file, config_file, some_val): |
|
734
|
|
|
create_arguments_mock(filename, file, config_file, some_val) |
|
735
|
|
|
return self.test_program_path, "--config", config_file |
|
736
|
|
|
|
|
737
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
738
|
|
|
use_stdin=True, |
|
739
|
|
|
use_stderr=True, |
|
740
|
|
|
output_format="regex", |
|
741
|
|
|
output_regex=self.test_program_regex, |
|
742
|
|
|
severity_map=self.test_program_severity_map, |
|
743
|
|
|
result_message="Invalid char provided!") |
|
744
|
|
|
(Handler) |
|
745
|
|
|
(self.section, None)) |
|
746
|
|
|
|
|
747
|
|
|
results = list(uut.run(self.testfile_path, |
|
748
|
|
|
self.testfile_content, |
|
749
|
|
|
some_val=33)) |
|
750
|
|
|
expected = [Result.from_values(uut, |
|
751
|
|
|
"Invalid char provided!", |
|
752
|
|
|
self.testfile_path, |
|
753
|
|
|
3, 0, 3, 1, |
|
754
|
|
|
RESULT_SEVERITY.MAJOR), |
|
755
|
|
|
Result.from_values(uut, |
|
756
|
|
|
"Invalid char provided!", |
|
757
|
|
|
self.testfile_path, |
|
758
|
|
|
5, 0, 5, 1, |
|
759
|
|
|
RESULT_SEVERITY.MAJOR), |
|
760
|
|
|
Result.from_values(uut, |
|
761
|
|
|
"Invalid char provided!", |
|
762
|
|
|
self.testfile_path, |
|
763
|
|
|
9, 0, 9, 1, |
|
764
|
|
|
RESULT_SEVERITY.MAJOR)] |
|
765
|
|
|
|
|
766
|
|
|
self.assertEqual(results, expected) |
|
767
|
|
|
create_arguments_mock.assert_called_once_with( |
|
768
|
|
|
self.testfile_path, self.testfile_content, ANY, 33) |
|
769
|
|
|
self.assertIsNotNone(create_arguments_mock.call_args[0][2]) |
|
770
|
|
|
generate_config_mock.assert_called_once_with( |
|
771
|
|
|
self.testfile_path, self.testfile_content, 33) |
|
772
|
|
|
|
|
773
|
|
|
def test_stdin_stderr_config_correction(self): |
|
774
|
|
|
create_arguments_mock = Mock() |
|
775
|
|
|
generate_config_mock = Mock() |
|
776
|
|
|
|
|
777
|
|
|
# `some_value_A` and `some_value_B` are used to test the different |
|
778
|
|
|
# delegation to `generate_config()` and `create_arguments()` |
|
779
|
|
|
# accordingly. |
|
780
|
|
|
class Handler: |
|
781
|
|
|
|
|
782
|
|
|
@staticmethod |
|
783
|
|
|
def generate_config(filename, file, some_value_A): |
|
784
|
|
|
generate_config_mock(filename, file, some_value_A) |
|
785
|
|
|
return "\n".join(["use_stdin", "use_stderr", "correct"]) |
|
786
|
|
|
|
|
787
|
|
|
@staticmethod |
|
788
|
|
|
def create_arguments(filename, file, config_file, some_value_B): |
|
789
|
|
|
create_arguments_mock(filename, file, config_file, |
|
790
|
|
|
some_value_B) |
|
791
|
|
|
return self.test_program_path, "--config", config_file |
|
792
|
|
|
|
|
793
|
|
|
uut = (linter(sys.executable, |
|
|
|
|
|
|
794
|
|
|
use_stdin=True, |
|
795
|
|
|
use_stdout=False, |
|
796
|
|
|
use_stderr=True, |
|
797
|
|
|
output_format="corrected", |
|
798
|
|
|
config_suffix=".conf") |
|
799
|
|
|
(Handler) |
|
800
|
|
|
(self.section, None)) |
|
801
|
|
|
|
|
802
|
|
|
results = list(uut.run(self.testfile2_path, |
|
803
|
|
|
self.testfile2_content, |
|
804
|
|
|
some_value_A=124, |
|
805
|
|
|
some_value_B=-78)) |
|
806
|
|
|
|
|
807
|
|
|
expected_correction = [s + "\n" for s in ["+", "/", "/", "-"]] |
|
808
|
|
|
|
|
809
|
|
|
diffs = list(Diff.from_string_arrays( |
|
810
|
|
|
self.testfile2_content, |
|
811
|
|
|
expected_correction).split_diff()) |
|
812
|
|
|
|
|
813
|
|
|
expected = [Result.from_values(uut, |
|
814
|
|
|
"Inconsistency found.", |
|
815
|
|
|
self.testfile2_path, |
|
816
|
|
|
1, None, 1, None, |
|
817
|
|
|
RESULT_SEVERITY.NORMAL, |
|
818
|
|
|
diffs={self.testfile2_path: diffs[0]}), |
|
819
|
|
|
Result.from_values(uut, |
|
820
|
|
|
"Inconsistency found.", |
|
821
|
|
|
self.testfile2_path, |
|
822
|
|
|
5, None, 5, None, |
|
823
|
|
|
RESULT_SEVERITY.NORMAL, |
|
824
|
|
|
diffs={self.testfile2_path: diffs[1]})] |
|
825
|
|
|
|
|
826
|
|
|
self.assertEqual(results, expected) |
|
827
|
|
|
create_arguments_mock.assert_called_once_with( |
|
828
|
|
|
self.testfile2_path, self.testfile2_content, ANY, -78) |
|
829
|
|
|
self.assertEqual(create_arguments_mock.call_args[0][2][-5:], ".conf") |
|
830
|
|
|
generate_config_mock.assert_called_once_with( |
|
831
|
|
|
self.testfile2_path, self.testfile2_content, 124) |
|
832
|
|
|
|