Conditions | 9 |
Total Lines | 55 |
Lines | 0 |
Ratio | 0 % |
Changes | 5 | ||
Bugs | 0 | Features | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | from __future__ import division |
||
27 | def __init__(self, config, report_backend): |
||
28 | self.verbose = config.getoption("benchmark_verbose") |
||
29 | self.logger = Logger(self.verbose, config) |
||
30 | self.config = config |
||
31 | self.performance_regressions = [] |
||
32 | self.benchmarks = [] |
||
33 | self.machine_id = get_machine_id() |
||
34 | self.report_backend = report_backend |
||
35 | |||
36 | self.options = dict( |
||
37 | min_time=SecondsDecimal(config.getoption("benchmark_min_time")), |
||
38 | min_rounds=config.getoption("benchmark_min_rounds"), |
||
39 | max_time=SecondsDecimal(config.getoption("benchmark_max_time")), |
||
40 | timer=load_timer(config.getoption("benchmark_timer")), |
||
41 | calibration_precision=config.getoption("benchmark_calibration_precision"), |
||
42 | disable_gc=config.getoption("benchmark_disable_gc"), |
||
43 | warmup=config.getoption("benchmark_warmup"), |
||
44 | warmup_iterations=config.getoption("benchmark_warmup_iterations"), |
||
45 | ) |
||
46 | self.skip = config.getoption("benchmark_skip") |
||
47 | self.disabled = config.getoption("benchmark_disable") and not config.getoption("benchmark_enable") |
||
48 | |||
49 | if config.getoption("dist", "no") != "no" and not self.skip: |
||
50 | self.logger.warn( |
||
51 | "BENCHMARK-U2", |
||
52 | "Benchmarks are automatically disabled because xdist plugin is active." |
||
53 | "Benchmarks cannot be performed reliably in a parallelized environment.", |
||
54 | fslocation="::" |
||
55 | ) |
||
56 | self.disabled = True |
||
57 | if hasattr(config, "slaveinput"): |
||
58 | self.disabled = True |
||
59 | if not statistics: |
||
60 | self.logger.warn( |
||
61 | "BENCHMARK-U3", |
||
62 | "Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" % |
||
63 | statistics_error, |
||
64 | fslocation="::" |
||
65 | ) |
||
66 | self.disabled = True |
||
67 | |||
68 | self.only = config.getoption("benchmark_only") |
||
69 | self.sort = config.getoption("benchmark_sort") |
||
70 | self.columns = config.getoption("benchmark_columns") |
||
71 | if self.skip and self.only: |
||
72 | raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.") |
||
73 | if self.disabled and self.only: |
||
74 | raise pytest.UsageError( |
||
75 | "Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is " |
||
76 | "automatically activated if xdist is on or you're missing the statistics dependency.") |
||
77 | self.group_by = config.getoption("benchmark_group_by") |
||
78 | self.compare_fail = config.getoption("benchmark_compare_fail") |
||
79 | self.name_format = NAME_FORMATTERS[config.getoption("benchmark_name")] |
||
80 | |||
81 | self.histogram = first_or_value(config.getoption("benchmark_histogram"), False) |
||
82 | |||
150 |