| Total Lines | 575 |
| Duplicated Lines | 13.22 % |
| Changes | 1 | ||
| Bugs | 0 | Features | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
| 1 | import platform |
||
| 29 | class LinterComponentTest(unittest.TestCase): |
||
| 30 | |||
| 31 | # Using `object` instead of an empty class results in inheritance problems |
||
| 32 | # inside the linter decorator. |
||
| 33 | class EmptyTestLinter: |
||
| 34 | pass |
||
| 35 | |||
| 36 | class RootDirTestLinter: |
||
| 37 | |||
| 38 | def create_arguments(self, *args, **kwargs): |
||
|
|
|||
| 39 | return tuple() |
||
| 40 | |||
| 41 | def get_config_dir(self): |
||
| 42 | View Code Duplication | return '/' |
|
| 43 | |||
| 44 | def process_output(self, output, *args, **kwargs): |
||
| 45 | assert output == '/\n', ("The linter doesn't run the command in " |
||
| 46 | "the right directory!") |
||
| 47 | |||
| 48 | class ManualProcessingTestLinter: |
||
| 49 | |||
| 50 | def process_output(self, *args, **kwargs): |
||
| 51 | pass |
||
| 52 | |||
| 53 | def setUp(self): |
||
| 54 | self.section = Section("TEST_SECTION") |
||
| 55 | |||
| 56 | def test_decorator_invalid_parameters(self): |
||
| 57 | with self.assertRaises(ValueError) as cm: |
||
| 58 | linter("some-executable", invalid_arg=88, ABC=2000) |
||
| 59 | self.assertEqual( |
||
| 60 | str(cm.exception), |
||
| 61 | "Invalid keyword arguments provided: 'ABC', 'invalid_arg'") |
||
| 62 | |||
| 63 | with self.assertRaises(ValueError) as cm: |
||
| 64 | linter("some-executable", diff_severity=RESULT_SEVERITY.MAJOR) |
||
| 65 | self.assertEqual(str(cm.exception), |
||
| 66 | "Invalid keyword arguments provided: 'diff_severity'") |
||
| 67 | |||
| 68 | with self.assertRaises(ValueError) as cm: |
||
| 69 | linter("some-executable", result_message="Custom message") |
||
| 70 | self.assertEqual(str(cm.exception), |
||
| 71 | "Invalid keyword arguments provided: " |
||
| 72 | "'result_message'") |
||
| 73 | |||
| 74 | with self.assertRaises(ValueError) as cm: |
||
| 75 | linter("some-executable", |
||
| 76 | output_format="corrected", |
||
| 77 | output_regex=".*") |
||
| 78 | self.assertEqual(str(cm.exception), |
||
| 79 | "Invalid keyword arguments provided: 'output_regex'") |
||
| 80 | |||
| 81 | View Code Duplication | with self.assertRaises(ValueError) as cm: |
|
| 82 | linter("some-executable", |
||
| 83 | output_format="corrected", |
||
| 84 | severity_map={}) |
||
| 85 | self.assertEqual(str(cm.exception), |
||
| 86 | "Invalid keyword arguments provided: 'severity_map'") |
||
| 87 | |||
| 88 | with self.assertRaises(ValueError) as cm: |
||
| 89 | linter("some-executable", |
||
| 90 | prerequisite_check_fail_message="some_message") |
||
| 91 | self.assertEqual(str(cm.exception), |
||
| 92 | "Invalid keyword arguments provided: " |
||
| 93 | "'prerequisite_check_fail_message'") |
||
| 94 | |||
| 95 | def test_decorator_invalid_states(self): |
||
| 96 | with self.assertRaises(ValueError) as cm: |
||
| 97 | linter("some-executable", use_stdout=False, use_stderr=False) |
||
| 98 | self.assertEqual(str(cm.exception), |
||
| 99 | "No output streams provided at all.") |
||
| 100 | |||
| 101 | with self.assertRaises(ValueError) as cm: |
||
| 102 | linter("some-executable", output_format="INVALID") |
||
| 103 | self.assertEqual(str(cm.exception), |
||
| 104 | "Invalid `output_format` specified.") |
||
| 105 | |||
| 106 | with self.assertRaises(ValueError) as cm: |
||
| 107 | linter("some-executable", output_format="regex") |
||
| 108 | self.assertEqual( |
||
| 109 | str(cm.exception), |
||
| 110 | "`output_regex` needed when specified output-format 'regex'.") |
||
| 111 | |||
| 112 | with self.assertRaises(ValueError) as cm: |
||
| 113 | linter("some-executable", |
||
| 114 | output_format="regex", |
||
| 115 | output_regex="", |
||
| 116 | severity_map={}) |
||
| 117 | self.assertEqual( |
||
| 118 | str(cm.exception), |
||
| 119 | "Provided `severity_map` but named group `severity` is not used " |
||
| 120 | "in `output_regex`.") |
||
| 121 | |||
| 122 | with self.assertRaises(ValueError) as cm: |
||
| 123 | linter("some-executable")(object) |
||
| 124 | self.assertEqual( |
||
| 125 | str(cm.exception), |
||
| 126 | "`process_output` not provided by given class 'object'.") |
||
| 127 | |||
| 128 | with self.assertRaises(ValueError) as cm: |
||
| 129 | (linter("some-executable", output_format="regex", output_regex="") |
||
| 130 | (self.ManualProcessingTestLinter)) |
||
| 131 | self.assertEqual( |
||
| 132 | str(cm.exception), |
||
| 133 | "Found `process_output` already defined by class " |
||
| 134 | "'ManualProcessingTestLinter', but 'regex' output-format is " |
||
| 135 | "specified.") |
||
| 136 | |||
| 137 | def test_decorator_generated_default_interface(self): |
||
| 138 | uut = linter("some-executable")(self.ManualProcessingTestLinter) |
||
| 139 | with self.assertRaises(NotImplementedError): |
||
| 140 | uut.create_arguments("filename", "content", None) |
||
| 141 | |||
| 142 | def test_decorator_invalid_parameter_types(self): |
||
| 143 | # Provide some invalid severity maps. |
||
| 144 | with self.assertRaises(TypeError): |
||
| 145 | linter("some-executable", |
||
| 146 | output_format="regex", |
||
| 147 | output_regex="(?P<severity>)", |
||
| 148 | severity_map=list()) |
||
| 149 | |||
| 150 | with self.assertRaises(TypeError): |
||
| 151 | linter("some-executable", |
||
| 152 | output_format="regex", |
||
| 153 | output_regex="(?P<severity>)", |
||
| 154 | severity_map={3: 0}) |
||
| 155 | |||
| 156 | with self.assertRaises(TypeError) as cm: |
||
| 157 | linter("some-executable", |
||
| 158 | output_format="regex", |
||
| 159 | output_regex="(?P<severity>)", |
||
| 160 | severity_map={"critical": "invalid"}) |
||
| 161 | self.assertEqual(str(cm.exception), |
||
| 162 | "The value 'invalid' for key 'critical' inside given " |
||
| 163 | "severity-map is no valid severity value.") |
||
| 164 | |||
| 165 | with self.assertRaises(TypeError) as cm: |
||
| 166 | linter("some-executable", |
||
| 167 | output_format="regex", |
||
| 168 | output_regex="(?P<severity>)", |
||
| 169 | severity_map={"critical-error": 389274234}) |
||
| 170 | self.assertEqual(str(cm.exception), |
||
| 171 | "Invalid severity value 389274234 for key " |
||
| 172 | "'critical-error' inside given severity-map.") |
||
| 173 | |||
| 174 | # Other type-error test cases. |
||
| 175 | |||
| 176 | with self.assertRaises(TypeError): |
||
| 177 | linter("some-executable", |
||
| 178 | output_format="regex", |
||
| 179 | output_regex="(?P<message>)", |
||
| 180 | result_message=None) |
||
| 181 | |||
| 182 | with self.assertRaises(TypeError): |
||
| 183 | linter("some-executable", |
||
| 184 | output_format="corrected", |
||
| 185 | result_message=list()) |
||
| 186 | |||
| 187 | with self.assertRaises(TypeError) as cm: |
||
| 188 | linter("some-executable", |
||
| 189 | output_format="corrected", |
||
| 190 | diff_severity=999888777) |
||
| 191 | self.assertEqual(str(cm.exception), |
||
| 192 | "Invalid value for `diff_severity`: 999888777") |
||
| 193 | |||
| 194 | with self.assertRaises(TypeError): |
||
| 195 | linter("some-executable", |
||
| 196 | prerequisite_check_command=("command",), |
||
| 197 | prerequisite_check_fail_message=382983) |
||
| 198 | |||
| 199 | def test_get_executable(self): |
||
| 200 | uut = linter("some-executable")(self.ManualProcessingTestLinter) |
||
| 201 | self.assertEqual(uut.get_executable(), "some-executable") |
||
| 202 | |||
| 203 | def test_check_prerequisites(self): |
||
| 204 | uut = linter(sys.executable)(self.ManualProcessingTestLinter) |
||
| 205 | self.assertTrue(uut.check_prerequisites()) |
||
| 206 | |||
| 207 | uut = (linter("invalid_nonexisting_programv412") |
||
| 208 | (self.ManualProcessingTestLinter)) |
||
| 209 | self.assertEqual(uut.check_prerequisites(), |
||
| 210 | "'invalid_nonexisting_programv412' is not installed.") |
||
| 211 | |||
| 212 | uut = (linter("invalid_nonexisting_programv412", |
||
| 213 | executable_check_fail_info="You can't install it.") |
||
| 214 | (self.ManualProcessingTestLinter)) |
||
| 215 | self.assertEqual(uut.check_prerequisites(), |
||
| 216 | "'invalid_nonexisting_programv412' is not installed. " |
||
| 217 | "You can't install it.") |
||
| 218 | |||
| 219 | uut = (linter(sys.executable, |
||
| 220 | prerequisite_check_command=(sys.executable, "--version")) |
||
| 221 | (self.ManualProcessingTestLinter)) |
||
| 222 | self.assertTrue(uut.check_prerequisites()) |
||
| 223 | |||
| 224 | uut = (linter(sys.executable, |
||
| 225 | prerequisite_check_command=("invalid_programv413",)) |
||
| 226 | (self.ManualProcessingTestLinter)) |
||
| 227 | self.assertEqual(uut.check_prerequisites(), |
||
| 228 | "Prerequisite check failed.") |
||
| 229 | |||
| 230 | uut = (linter(sys.executable, |
||
| 231 | prerequisite_check_command=("invalid_programv413",), |
||
| 232 | prerequisite_check_fail_message="NOPE") |
||
| 233 | (self.ManualProcessingTestLinter)) |
||
| 234 | self.assertEqual(uut.check_prerequisites(), "NOPE") |
||
| 235 | |||
| 236 | def test_output_stream(self): |
||
| 237 | process_output_mock = Mock() |
||
| 238 | |||
| 239 | class TestLinter: |
||
| 240 | |||
| 241 | @staticmethod |
||
| 242 | def process_output(output, filename, file): |
||
| 243 | process_output_mock(output, filename, file) |
||
| 244 | |||
| 245 | @staticmethod |
||
| 246 | def create_arguments(filename, file, config_file): |
||
| 247 | code = "\n".join(["import sys", |
||
| 248 | "print('hello stdout')", |
||
| 249 | "print('hello stderr', file=sys.stderr)"]) |
||
| 250 | return "-c", code |
||
| 251 | |||
| 252 | uut = (linter(sys.executable, use_stdout=True) |
||
| 253 | (TestLinter) |
||
| 254 | (self.section, None)) |
||
| 255 | uut.run("", []) |
||
| 256 | |||
| 257 | process_output_mock.assert_called_once_with("hello stdout\n", "", []) |
||
| 258 | process_output_mock.reset_mock() |
||
| 259 | |||
| 260 | uut = (linter(sys.executable, use_stdout=False, use_stderr=True) |
||
| 261 | (TestLinter) |
||
| 262 | (self.section, None)) |
||
| 263 | uut.run("", []) |
||
| 264 | |||
| 265 | process_output_mock.assert_called_once_with("hello stderr\n", "", []) |
||
| 266 | process_output_mock.reset_mock() |
||
| 267 | |||
| 268 | uut = (linter(sys.executable, use_stdout=True, use_stderr=True) |
||
| 269 | (TestLinter) |
||
| 270 | (self.section, None)) |
||
| 271 | |||
| 272 | uut.run("", []) |
||
| 273 | |||
| 274 | process_output_mock.assert_called_once_with(("hello stdout\n", |
||
| 275 | "hello stderr\n"), "", []) |
||
| 276 | |||
| 277 | def test_process_output_corrected(self): |
||
| 278 | uut = (linter(sys.executable, output_format="corrected") |
||
| 279 | (self.EmptyTestLinter) |
||
| 280 | (self.section, None)) |
||
| 281 | |||
| 282 | original = ["void main() {\n", "return 09;\n", "}\n"] |
||
| 283 | fixed = ["void main()\n", "{\n", "return 9;\n", "}\n"] |
||
| 284 | fixed_string = "".join(fixed) |
||
| 285 | |||
| 286 | results = list(uut.process_output(fixed_string, |
||
| 287 | "some-file.c", |
||
| 288 | original)) |
||
| 289 | |||
| 290 | diffs = list(Diff.from_string_arrays(original, fixed).split_diff()) |
||
| 291 | expected = [Result.from_values(uut, |
||
| 292 | "Inconsistency found.", |
||
| 293 | "some-file.c", |
||
| 294 | 1, None, 2, None, |
||
| 295 | RESULT_SEVERITY.NORMAL, |
||
| 296 | diffs={"some-file.c": diffs[0]})] |
||
| 297 | |||
| 298 | self.assertEqual(results, expected) |
||
| 299 | |||
| 300 | # Test when providing a sequence as output. |
||
| 301 | |||
| 302 | results = list(uut.process_output([fixed_string, fixed_string], |
||
| 303 | "some-file.c", |
||
| 304 | original)) |
||
| 305 | self.assertEqual(results, 2 * expected) |
||
| 306 | |||
| 307 | # Test diff_distance |
||
| 308 | |||
| 309 | uut = (linter(sys.executable, |
||
| 310 | output_format="corrected", |
||
| 311 | diff_distance=-1) |
||
| 312 | (self.EmptyTestLinter) |
||
| 313 | (self.section, None)) |
||
| 314 | |||
| 315 | results = list(uut.process_output(fixed_string, |
||
| 316 | "some-file.c", |
||
| 317 | original)) |
||
| 318 | self.assertEqual(len(results), 2) |
||
| 319 | |||
| 320 | def test_process_output_regex(self): |
||
| 321 | # Also test the case when an unknown severity is matched. |
||
| 322 | test_output = ("12:4-14:0-Serious issue (error) -> ORIGIN=X -> D\n" |
||
| 323 | "0:0-0:1-This is a warning (warning) -> ORIGIN=Y -> A\n" |
||
| 324 | "813:77-1024:32-Just a note (info) -> ORIGIN=Z -> C\n" |
||
| 325 | "0:0-0:0-Some unknown sev (???) -> ORIGIN=W -> B\n") |
||
| 326 | regex = (r"(?P<line>\d+):(?P<column>\d+)-" |
||
| 327 | r"(?P<end_line>\d+):(?P<end_column>\d+)-" |
||
| 328 | r"(?P<message>.*) \((?P<severity>.*)\) -> " |
||
| 329 | r"ORIGIN=(?P<origin>.*) -> (?P<additional_info>.*)") |
||
| 330 | |||
| 331 | uut = (linter(sys.executable, |
||
| 332 | output_format="regex", |
||
| 333 | output_regex=regex) |
||
| 334 | (self.EmptyTestLinter) |
||
| 335 | (self.section, None)) |
||
| 336 | uut.warn = Mock() |
||
| 337 | |||
| 338 | sample_file = "some-file.xtx" |
||
| 339 | results = list(uut.process_output(test_output, sample_file, [""])) |
||
| 340 | expected = [Result.from_values("EmptyTestLinter (X)", |
||
| 341 | "Serious issue", |
||
| 342 | sample_file, |
||
| 343 | 12, 4, 14, 0, |
||
| 344 | RESULT_SEVERITY.MAJOR, |
||
| 345 | additional_info="D"), |
||
| 346 | Result.from_values("EmptyTestLinter (Y)", |
||
| 347 | "This is a warning", |
||
| 348 | sample_file, |
||
| 349 | 0, 0, 0, 1, |
||
| 350 | RESULT_SEVERITY.NORMAL, |
||
| 351 | additional_info="A"), |
||
| 352 | Result.from_values("EmptyTestLinter (Z)", |
||
| 353 | "Just a note", |
||
| 354 | sample_file, |
||
| 355 | 813, 77, 1024, 32, |
||
| 356 | RESULT_SEVERITY.INFO, |
||
| 357 | additional_info="C"), |
||
| 358 | Result.from_values("EmptyTestLinter (W)", |
||
| 359 | "Some unknown sev", |
||
| 360 | sample_file, |
||
| 361 | 0, 0, 0, 0, |
||
| 362 | RESULT_SEVERITY.NORMAL, |
||
| 363 | additional_info="B")] |
||
| 364 | |||
| 365 | self.assertEqual(results, expected) |
||
| 366 | uut.warn.assert_called_once_with( |
||
| 367 | "'???' not found in severity-map. Assuming " |
||
| 368 | "`RESULT_SEVERITY.NORMAL`.") |
||
| 369 | |||
| 370 | # Test when providing a sequence as output. |
||
| 371 | test_output = ["", |
||
| 372 | "12:4-14:0-Serious issue (error) -> ORIGIN=X -> XYZ\n"] |
||
| 373 | results = list(uut.process_output(test_output, sample_file, [""])) |
||
| 374 | expected = [Result.from_values("EmptyTestLinter (X)", |
||
| 375 | "Serious issue", |
||
| 376 | sample_file, |
||
| 377 | 12, 4, 14, 0, |
||
| 378 | RESULT_SEVERITY.MAJOR, |
||
| 379 | additional_info="XYZ")] |
||
| 380 | |||
| 381 | self.assertEqual(results, expected) |
||
| 382 | |||
| 383 | # Test with using `result_message` parameter. |
||
| 384 | uut = (linter(sys.executable, |
||
| 385 | output_format="regex", |
||
| 386 | output_regex=regex, |
||
| 387 | result_message="Hello world") |
||
| 388 | (self.EmptyTestLinter) |
||
| 389 | (self.section, None)) |
||
| 390 | |||
| 391 | results = list(uut.process_output(test_output, sample_file, [""])) |
||
| 392 | expected = [Result.from_values("EmptyTestLinter (X)", |
||
| 393 | "Hello world", |
||
| 394 | sample_file, |
||
| 395 | 12, 4, 14, 0, |
||
| 396 | RESULT_SEVERITY.MAJOR, |
||
| 397 | additional_info="XYZ")] |
||
| 398 | |||
| 399 | self.assertEqual(results, expected) |
||
| 400 | |||
| 401 | def test_minimal_regex(self): |
||
| 402 | uut = (linter(sys.executable, |
||
| 403 | output_format="regex", |
||
| 404 | output_regex="an_issue") |
||
| 405 | (self.EmptyTestLinter) |
||
| 406 | (self.section, None)) |
||
| 407 | |||
| 408 | results = list(uut.process_output(['not an issue'], 'file', [""])) |
||
| 409 | self.assertEqual(results, []) |
||
| 410 | |||
| 411 | results = list(uut.process_output(['an_issue'], 'file', [""])) |
||
| 412 | self.assertEqual(results, [Result.from_values("EmptyTestLinter", "", |
||
| 413 | file="file")]) |
||
| 414 | |||
| 415 | def test_get_non_optional_settings(self): |
||
| 416 | class Handler(self.ManualProcessingTestLinter): |
||
| 417 | |||
| 418 | @staticmethod |
||
| 419 | def create_arguments(filename, file, config_file, param_x: int): |
||
| 420 | pass |
||
| 421 | |||
| 422 | @staticmethod |
||
| 423 | def generate_config(filename, file, superparam): |
||
| 424 | """ |
||
| 425 | :param superparam: A superparam! |
||
| 426 | """ |
||
| 427 | return None |
||
| 428 | |||
| 429 | uut = linter(sys.executable)(Handler) |
||
| 430 | |||
| 431 | self.assertEqual(uut.get_non_optional_settings(), |
||
| 432 | {"param_x": ("No description given.", int), |
||
| 433 | "superparam": ("A superparam!", None)}) |
||
| 434 | |||
| 435 | def test_process_output_metadata_omits_on_builtin_formats(self): |
||
| 436 | uut = (linter(executable='', output_format='corrected') |
||
| 437 | (self.EmptyTestLinter)) |
||
| 438 | # diff_severity and result_message should now not occur inside the |
||
| 439 | # metadata definition. |
||
| 440 | self.assertNotIn("diff_severity", uut.get_metadata().optional_params) |
||
| 441 | self.assertNotIn("result_message", uut.get_metadata().optional_params) |
||
| 442 | self.assertNotIn("diff_severity", |
||
| 443 | uut.get_metadata().non_optional_params) |
||
| 444 | self.assertNotIn("result_message", |
||
| 445 | uut.get_metadata().non_optional_params) |
||
| 446 | |||
| 447 | # But every parameter manually defined in process_output shall appear |
||
| 448 | # inside the metadata signature. |
||
| 449 | class Handler: |
||
| 450 | |||
| 451 | @staticmethod |
||
| 452 | def create_arguments(filename, file, config_file): |
||
| 453 | pass |
||
| 454 | |||
| 455 | @staticmethod |
||
| 456 | def process_output(output, filename, file, diff_severity): |
||
| 457 | pass |
||
| 458 | |||
| 459 | uut = linter(executable='')(Handler) |
||
| 460 | self.assertIn("diff_severity", uut.get_metadata().non_optional_params) |
||
| 461 | |||
| 462 | def test_section_settings_forwarding(self): |
||
| 463 | create_arguments_mock = Mock() |
||
| 464 | generate_config_mock = Mock() |
||
| 465 | process_output_mock = Mock() |
||
| 466 | |||
| 467 | class Handler(self.ManualProcessingTestLinter): |
||
| 468 | |||
| 469 | @staticmethod |
||
| 470 | def create_arguments(filename, file, config_file, my_param: int): |
||
| 471 | create_arguments_mock(filename, file, config_file, my_param) |
||
| 472 | # Execute python and do nothing. |
||
| 473 | return "-c", "print('coala!')" |
||
| 474 | |||
| 475 | @staticmethod |
||
| 476 | def generate_config(filename, file, my_config_param: int): |
||
| 477 | generate_config_mock(filename, file, my_config_param) |
||
| 478 | return None |
||
| 479 | |||
| 480 | def process_output(self, output, filename, file, makman2: str): |
||
| 481 | process_output_mock(output, filename, file, makman2) |
||
| 482 | |||
| 483 | self.section["my_param"] = "109" |
||
| 484 | self.section["my_config_param"] = "88" |
||
| 485 | self.section["makman2"] = "is cool" |
||
| 486 | |||
| 487 | uut = linter(sys.executable)(Handler)(self.section, None) |
||
| 488 | |||
| 489 | self.assertIsNotNone(list(uut.execute(filename="some_file.cs", |
||
| 490 | file=[]))) |
||
| 491 | create_arguments_mock.assert_called_once_with( |
||
| 492 | "some_file.cs", [], None, 109) |
||
| 493 | generate_config_mock.assert_called_once_with("some_file.cs", [], 88) |
||
| 494 | process_output_mock.assert_called_once_with( |
||
| 495 | "coala!\n", "some_file.cs", [], "is cool") |
||
| 496 | |||
| 497 | def test_section_settings_defaults_forwarding(self): |
||
| 498 | create_arguments_mock = Mock() |
||
| 499 | generate_config_mock = Mock() |
||
| 500 | process_output_mock = Mock() |
||
| 501 | |||
| 502 | class Handler: |
||
| 503 | |||
| 504 | @staticmethod |
||
| 505 | def generate_config(filename, file, some_default: str="x"): |
||
| 506 | generate_config_mock(filename, file, some_default) |
||
| 507 | return None |
||
| 508 | |||
| 509 | @staticmethod |
||
| 510 | def create_arguments(filename, file, config_file, default: int=3): |
||
| 511 | create_arguments_mock( |
||
| 512 | filename, file, config_file, default) |
||
| 513 | return "-c", "print('hello')" |
||
| 514 | |||
| 515 | @staticmethod |
||
| 516 | def process_output(output, filename, file, xxx: int=64): |
||
| 517 | process_output_mock(output, filename, file, xxx) |
||
| 518 | |||
| 519 | uut = linter(sys.executable)(Handler)(self.section, None) |
||
| 520 | |||
| 521 | self.assertIsNotNone(list(uut.execute(filename="abc.py", file=[]))) |
||
| 522 | create_arguments_mock.assert_called_once_with("abc.py", [], None, 3) |
||
| 523 | generate_config_mock.assert_called_once_with("abc.py", [], "x") |
||
| 524 | process_output_mock.assert_called_once_with( |
||
| 525 | "hello\n", "abc.py", [], 64) |
||
| 526 | |||
| 527 | create_arguments_mock.reset_mock() |
||
| 528 | generate_config_mock.reset_mock() |
||
| 529 | process_output_mock.reset_mock() |
||
| 530 | |||
| 531 | self.section["default"] = "1000" |
||
| 532 | self.section["some_default"] = "xyz" |
||
| 533 | self.section["xxx"] = "-50" |
||
| 534 | self.assertIsNotNone(list(uut.execute(filename="def.py", file=[]))) |
||
| 535 | create_arguments_mock.assert_called_once_with("def.py", [], None, 1000) |
||
| 536 | generate_config_mock.assert_called_once_with("def.py", [], "xyz") |
||
| 537 | process_output_mock.assert_called_once_with( |
||
| 538 | "hello\n", "def.py", [], -50) |
||
| 539 | |||
| 540 | def test_invalid_arguments(self): |
||
| 541 | |||
| 542 | class InvalidArgumentsLinter(self.ManualProcessingTestLinter): |
||
| 543 | |||
| 544 | @staticmethod |
||
| 545 | def create_arguments(filename, file, config_file): |
||
| 546 | return None |
||
| 547 | |||
| 548 | uut = (linter(sys.executable)(InvalidArgumentsLinter) |
||
| 549 | (self.section, None)) |
||
| 550 | self.assertEqual(uut.run("", []), None) |
||
| 551 | |||
| 552 | def test_generate_config(self): |
||
| 553 | uut = linter("")(self.ManualProcessingTestLinter) |
||
| 554 | with uut._create_config("filename", []) as config_file: |
||
| 555 | self.assertIsNone(config_file) |
||
| 556 | |||
| 557 | class ConfigurationTestLinter(self.ManualProcessingTestLinter): |
||
| 558 | |||
| 559 | @staticmethod |
||
| 560 | def generate_config(filename, file, val): |
||
| 561 | return "config_value = " + str(val) |
||
| 562 | |||
| 563 | uut = linter("", config_suffix=".xml")(ConfigurationTestLinter) |
||
| 564 | with uut._create_config("filename", [], val=88) as config_file: |
||
| 565 | self.assertTrue(os.path.isfile(config_file)) |
||
| 566 | self.assertEqual(config_file[-4:], ".xml") |
||
| 567 | with open(config_file, mode="r") as fl: |
||
| 568 | self.assertEqual(fl.read(), "config_value = 88") |
||
| 569 | self.assertFalse(os.path.isfile(config_file)) |
||
| 570 | |||
| 571 | def test_metaclass_repr(self): |
||
| 572 | uut = linter("my-tool")(self.ManualProcessingTestLinter) |
||
| 573 | self.assertEqual( |
||
| 574 | repr(uut), |
||
| 575 | "<ManualProcessingTestLinter linter class (wrapping 'my-tool')>") |
||
| 576 | |||
| 577 | # Test also whether derivatives change the class name accordingly. |
||
| 578 | class DerivedLinter(uut): |
||
| 579 | pass |
||
| 580 | self.assertEqual(repr(DerivedLinter), |
||
| 581 | "<DerivedLinter linter class (wrapping 'my-tool')>") |
||
| 582 | |||
| 583 | def test_repr(self): |
||
| 584 | uut = (linter(sys.executable) |
||
| 585 | (self.ManualProcessingTestLinter) |
||
| 586 | (self.section, None)) |
||
| 587 | |||
| 588 | self.assertRegex( |
||
| 589 | repr(uut), |
||
| 590 | "<ManualProcessingTestLinter linter object \\(wrapping " + |
||
| 591 | re.escape(repr(sys.executable)) + "\\) at 0x[a-fA-F0-9]+>") |
||
| 592 | |||
| 593 | @skipIf(platform.system() == "Windows", |
||
| 594 | "Nobody can sanely test things on windows") |
||
| 595 | def test_process_directory(self): |
||
| 596 | """ |
||
| 597 | The linter shall run the process in the right directory so tools can |
||
| 598 | use the current working directory to resolve import like things. |
||
| 599 | """ |
||
| 600 | uut = (linter("pwd") |
||
| 601 | (self.RootDirTestLinter) |
||
| 602 | (self.section, None)) |
||
| 603 | uut.run('', []) # Does an assert in the output processing |
||
| 604 | |||
| 858 |
If a method does not access any attributes of the class, it could also be implemented as a function or static method. This can help improve readability. For example
could be written as