Conditions | 9 |
Total Lines | 65 |
Lines | 0 |
Ratio | 0 % |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | from __future__ import division |
||
29 | def __init__(self, config): |
||
30 | self.verbose = config.getoption("benchmark_verbose") |
||
31 | self.logger = Logger(self.verbose, config) |
||
32 | self.config = config |
||
33 | self.performance_regressions = [] |
||
34 | self.benchmarks = [] |
||
35 | self.machine_id = get_machine_id() |
||
36 | self.machine_info = config.hook.pytest_benchmark_generate_machine_info(config=self.config) |
||
37 | self.config.hook.pytest_benchmark_update_machine_info( |
||
38 | config=self.config, |
||
39 | machine_info=self.machine_info |
||
40 | ) |
||
41 | |||
42 | self.options = dict( |
||
43 | min_time=SecondsDecimal(config.getoption("benchmark_min_time")), |
||
44 | min_rounds=config.getoption("benchmark_min_rounds"), |
||
45 | max_time=SecondsDecimal(config.getoption("benchmark_max_time")), |
||
46 | timer=load_timer(config.getoption("benchmark_timer")), |
||
47 | calibration_precision=config.getoption("benchmark_calibration_precision"), |
||
48 | disable_gc=config.getoption("benchmark_disable_gc"), |
||
49 | warmup=config.getoption("benchmark_warmup"), |
||
50 | warmup_iterations=config.getoption("benchmark_warmup_iterations"), |
||
51 | ) |
||
52 | self.skip = config.getoption("benchmark_skip") |
||
53 | self.disable = config.getoption("benchmark_disable") |
||
54 | |||
55 | if config.getoption("dist", "no") != "no" and not self.skip: |
||
56 | self.logger.warn( |
||
57 | "BENCHMARK-U2", |
||
58 | "Benchmarks are automatically disabled because xdist plugin is active." |
||
59 | "Benchmarks cannot be performed reliably in a parallelized environment.", |
||
60 | fslocation="::" |
||
61 | ) |
||
62 | self.disable = True |
||
63 | if hasattr(config, "slaveinput"): |
||
64 | self.disable = True |
||
65 | if not statistics: |
||
66 | self.logger.warn( |
||
67 | "BENCHMARK-U3", |
||
68 | "Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" % |
||
69 | statistics_error, |
||
70 | fslocation="::" |
||
71 | ) |
||
72 | self.disable = True |
||
73 | |||
74 | self.only = config.getoption("benchmark_only") |
||
75 | self.sort = config.getoption("benchmark_sort") |
||
76 | self.columns = config.getoption("benchmark_columns") |
||
77 | if self.skip and self.only: |
||
78 | raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.") |
||
79 | if self.disable and self.only: |
||
80 | raise pytest.UsageError( |
||
81 | "Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is " |
||
82 | "automatically activated if xdist is on or you're missing the statistics dependency.") |
||
83 | self.group_by = config.getoption("benchmark_group_by") |
||
84 | self.save = config.getoption("benchmark_save") |
||
85 | self.autosave = config.getoption("benchmark_autosave") |
||
86 | self.save_data = config.getoption("benchmark_save_data") |
||
87 | self.json = config.getoption("benchmark_json") |
||
88 | self.compare = config.getoption("benchmark_compare") |
||
89 | self.compare_fail = config.getoption("benchmark_compare_fail") |
||
90 | |||
91 | self.storage = Storage(config.getoption("benchmark_storage"), |
||
92 | default_machine_id=self.machine_id, logger=self.logger) |
||
93 | self.histogram = first_or_value(config.getoption("benchmark_histogram"), False) |
||
94 | |||
235 |