Completed
Push — master ( 5dce03...efda8a )
by Ionel Cristian
01:05
created

src.pytest_benchmark.BenchmarkSession   B

Complexity

Total Complexity 45

Size/Duplication

Total Lines 211
Duplicated Lines 0 %
Metric Value
dl 0
loc 211
rs 8.3673
wmc 45

8 Methods

Rating   Name   Duplication   Size   Complexity  
A display() 0 13 2
C handle_loading() 0 29 7
D handle_saving() 0 42 8
D prepare_benchmarks() 0 23 9
D __init__() 0 65 9
A next_num() 0 11 4
A finish() 0 7 1
B check_regressions() 0 9 5

How to fix   Complexity   

Complex Class

Complex classes like src.pytest_benchmark.BenchmarkSession often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
from __future__ import division
2
from __future__ import print_function
3
4
import json
5
import traceback
6
7
import pytest
8
9
from .logger import Logger
10
from .storage import Storage
11
from .table import ResultsTable
12
from .utils import SecondsDecimal
13
from .utils import annotate_source
14
from .utils import first_or_value
15
from .utils import get_machine_id
16
from .utils import load_timer
17
from .utils import short_filename
18
19
try:
20
    import statistics
21
except (ImportError, SyntaxError):
22
    statistics = False
23
    statistics_error = traceback.format_exc()
24
else:
25
    from .stats import Stats
26
27
28
class PerformanceRegression(pytest.UsageError):
29
    pass
30
31
32
class BenchmarkSession(object):
33
    compared_mapping = None
34
    groups = None
35
36
    def __init__(self, config):
37
        self.verbose = config.getoption("benchmark_verbose")
38
        self.logger = Logger(self.verbose, config)
39
        self.config = config
40
        self.performance_regressions = []
41
        self.benchmarks = []
42
        self.machine_id = get_machine_id()
43
        self.machine_info = config.hook.pytest_benchmark_generate_machine_info(config=self.config)
44
        self.config.hook.pytest_benchmark_update_machine_info(
45
            config=self.config,
46
            machine_info=self.machine_info
47
        )
48
49
        self.options = dict(
50
            min_time=SecondsDecimal(config.getoption("benchmark_min_time")),
51
            min_rounds=config.getoption("benchmark_min_rounds"),
52
            max_time=SecondsDecimal(config.getoption("benchmark_max_time")),
53
            timer=load_timer(config.getoption("benchmark_timer")),
54
            calibration_precision=config.getoption("benchmark_calibration_precision"),
55
            disable_gc=config.getoption("benchmark_disable_gc"),
56
            warmup=config.getoption("benchmark_warmup"),
57
            warmup_iterations=config.getoption("benchmark_warmup_iterations"),
58
        )
59
        self.skip = config.getoption("benchmark_skip")
60
        self.disable = config.getoption("benchmark_disable")
61
62
        if config.getoption("dist", "no") != "no" and not self.skip:
63
            self.logger.warn(
64
                "BENCHMARK-U2",
65
                "Benchmarks are automatically disabled because xdist plugin is active."
66
                "Benchmarks cannot be performed reliably in a parallelized environment.",
67
                fslocation="::"
68
            )
69
            self.disable = True
70
        if hasattr(config, "slaveinput"):
71
            self.disable = True
72
        if not statistics:
73
            self.logger.warn(
74
                "BENCHMARK-U3",
75
                "Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" %
76
                statistics_error,
77
                fslocation="::"
78
            )
79
            self.disable = True
80
81
        self.only = config.getoption("benchmark_only")
82
        self.sort = config.getoption("benchmark_sort")
83
        self.columns = config.getoption("benchmark_columns")
84
        if self.skip and self.only:
85
            raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.")
86
        if self.disable and self.only:
87
            raise pytest.UsageError(
88
                "Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
89
                "automatically activated if xdist is on or you're missing the statistics dependency.")
90
        self.group_by = config.getoption("benchmark_group_by")
91
        self.save = config.getoption("benchmark_save")
92
        self.autosave = config.getoption("benchmark_autosave")
93
        self.save_data = config.getoption("benchmark_save_data")
94
        self.json = config.getoption("benchmark_json")
95
        self.compare = config.getoption("benchmark_compare")
96
        self.compare_fail = config.getoption("benchmark_compare_fail")
97
98
        self.storage = Storage(config.getoption("benchmark_storage"),
99
                               default_machine_id=self.machine_id, logger=self.logger)
100
        self.histogram = first_or_value(config.getoption("benchmark_histogram"), False)
101
102
    def prepare_benchmarks(self):
103
        for bench in self.benchmarks:
104
            if bench:
105
                compared = False
106
                for path, compared_mapping in self.compared_mapping.items():
107
                    if bench.fullname in compared_mapping:
108
                        compared = compared_mapping[bench.fullname]
109
                        source = short_filename(path, self.machine_id)
110
                        flat_bench = bench.as_dict(include_data=False, stats=False)
111
                        flat_bench.update(compared["stats"])
112
                        flat_bench["path"] = str(path)
113
                        annotate_source(flat_bench, source)
114
                        if self.compare_fail:
115
                            for check in self.compare_fail:
116
                                fail = check.fails(bench, flat_bench)
117
                                if fail:
118
                                    self.performance_regressions.append((flat_bench["fullname"], fail))
119
                        yield flat_bench
120
                flat_bench = bench.as_dict(include_data=False, flat=True)
121
                flat_bench["path"] = None
122
                if compared:
123
                    annotate_source(flat_bench, "NOW")
124
                yield flat_bench
125
126
    @property
127
    def next_num(self):
128
        files = self.storage.query("[0-9][0-9][0-9][0-9]_*")
129
        files.sort(reverse=True)
130
        if not files:
131
            return "0001"
132
        for f in files:
133
            try:
134
                return "%04i" % (int(str(f.name).split('_')[0]) + 1)
135
            except ValueError:
136
                raise
137
138
    def handle_saving(self):
139
        save = self.save or self.autosave
140
        if save or self.json:
141
            commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config)
142
            self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info)
143
144
        if self.json:
145
            output_json = self.config.hook.pytest_benchmark_generate_json(
146
                config=self.config,
147
                benchmarks=self.benchmarks,
148
                include_data=True,
149
                machine_info=self.machine_info,
150
                commit_info=commit_info,
151
            )
152
            self.config.hook.pytest_benchmark_update_json(
153
                config=self.config,
154
                benchmarks=self.benchmarks,
155
                output_json=output_json,
156
            )
157
            with self.json as fh:
158
                fh.write(json.dumps(output_json, ensure_ascii=True, indent=4).encode())
159
            self.logger.info("Wrote benchmark data in %s" % self.json, purple=True)
160
161
        if save:
162
            output_json = self.config.hook.pytest_benchmark_generate_json(
163
                config=self.config,
164
                benchmarks=self.benchmarks,
165
                include_data=self.save_data,
166
                machine_info=self.machine_info,
167
                commit_info=commit_info,
168
            )
169
            self.config.hook.pytest_benchmark_update_json(
170
                config=self.config,
171
                benchmarks=self.benchmarks,
172
                output_json=output_json,
173
            )
174
            output_file = self.storage.get("%s_%s.json" % (self.next_num, save))
175
            assert not output_file.exists()
176
177
            with output_file.open('wb') as fh:
178
                fh.write(json.dumps(output_json, ensure_ascii=True, indent=4).encode())
179
            self.logger.info("Saved benchmark data in %s" % output_file)
180
181
    def handle_loading(self):
182
        self.compared_mapping = {}
183
        if self.compare:
184
            if self.compare is True:
185
                compared_benchmarks = list(self.storage.load('[0-9][0-9][0-9][0-9]_'))[-1:]
186
            else:
187
                compared_benchmarks = list(self.storage.load(self.compare))
188
189
            if not compared_benchmarks:
190
                msg = "Can't compare. No benchmark files in %r" % str(self.storage)
191
                if self.compare is True:
192
                    msg += ". Can't load the previous benchmark."
193
                    code = "BENCHMARK-C2"
194
                else:
195
                    msg += " match %r." % self.compare
196
                    code = "BENCHMARK-C1"
197
                self.logger.warn(code, msg, fslocation=self.storage.location)
198
199
            for path, compared_benchmark in compared_benchmarks:
200
                self.config.hook.pytest_benchmark_compare_machine_info(
201
                    config=self.config,
202
                    benchmarksession=self,
203
                    machine_info=self.machine_info,
204
                    compared_benchmark=compared_benchmark,
205
                )
206
                self.compared_mapping[path] = dict(
207
                    (bench['fullname'], bench) for bench in compared_benchmark['benchmarks']
208
                )
209
                self.logger.info("Comparing against benchmark %s" % path)
210
211
    def finish(self):
212
        self.handle_saving()
213
        self.handle_loading()
214
        self.groups = self.config.hook.pytest_benchmark_group_stats(
215
            config=self.config,
216
            benchmarks=self.prepare_benchmarks(),
217
            group_by=self.group_by
218
        )
219
220
    def display(self, tr):
221
        if not self.groups:
222
            return
223
224
        tr.ensure_newline()
225
        results_table = ResultsTable(
226
            columns=self.columns,
227
            sort=self.sort,
228
            histogram=self.histogram,
229
            logger=self.logger
230
        )
231
        results_table.display(tr, self.groups)
232
        self.check_regressions()
233
234
    def check_regressions(self):
235
        if self.compare_fail and not self.compared_mapping:
236
            raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.")
237
238
        if self.performance_regressions:
239
            self.logger.error("Performance has regressed:\n%s" % "\n".join(
240
                "\t%s - %s" % line for line in self.performance_regressions
241
            ))
242
            raise PerformanceRegression("Performance has regressed.")
243