Completed
Push — master ( 7b504c...b6f606 )
by Ionel Cristian
01:16
created

BenchmarkSession.handle_saving()   D

Complexity

Conditions 8

Size

Total Lines 42

Duplication

Lines 35
Ratio 83.33 %

Importance

Changes 5
Bugs 0 Features 0
Metric Value
cc 8
c 5
b 0
f 0
dl 35
loc 42
rs 4
1
from __future__ import division
2
from __future__ import print_function
3
4
import pytest
5
6
from .fixture import statistics
7
from .fixture import statistics_error
8
from .logger import Logger
9
from .storage import Storage
10
from .table import TableResults
11
from .utils import NAME_FORMATTERS
12
from .utils import SecondsDecimal
13
from .utils import cached_property
14
from .utils import first_or_value
15
from .utils import get_machine_id
16
from .utils import load_timer
17
from .utils import safe_dumps
18
from .utils import short_filename
19
20
21
class PerformanceRegression(pytest.UsageError):
22
    pass
23
24
25
class BenchmarkSession(object):
26
    compared_mapping = None
27
    groups = None
28
29
    def __init__(self, config):
30
        self.verbose = config.getoption("benchmark_verbose")
31
        self.logger = Logger(self.verbose, config)
32
        self.config = config
33
        self.performance_regressions = []
34
        self.benchmarks = []
35
        self.machine_id = get_machine_id()
36
37
        self.options = dict(
38
            min_time=SecondsDecimal(config.getoption("benchmark_min_time")),
39
            min_rounds=config.getoption("benchmark_min_rounds"),
40
            max_time=SecondsDecimal(config.getoption("benchmark_max_time")),
41
            timer=load_timer(config.getoption("benchmark_timer")),
42
            calibration_precision=config.getoption("benchmark_calibration_precision"),
43
            disable_gc=config.getoption("benchmark_disable_gc"),
44
            warmup=config.getoption("benchmark_warmup"),
45
            warmup_iterations=config.getoption("benchmark_warmup_iterations"),
46
        )
47
        self.skip = config.getoption("benchmark_skip")
48
        self.disabled = config.getoption("benchmark_disable") and not config.getoption("benchmark_enable")
49
50
        if config.getoption("dist", "no") != "no" and not self.skip:
51
            self.logger.warn(
52
                "BENCHMARK-U2",
53
                "Benchmarks are automatically disabled because xdist plugin is active."
54
                "Benchmarks cannot be performed reliably in a parallelized environment.",
55
                fslocation="::"
56
            )
57
            self.disabled = True
58
        if hasattr(config, "slaveinput"):
59
            self.disabled = True
60
        if not statistics:
61
            self.logger.warn(
62
                "BENCHMARK-U3",
63
                "Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" %
64
                statistics_error,
65
                fslocation="::"
66
            )
67
            self.disabled = True
68
69
        self.only = config.getoption("benchmark_only")
70
        self.sort = config.getoption("benchmark_sort")
71
        self.columns = config.getoption("benchmark_columns")
72
        if self.skip and self.only:
73
            raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.")
74
        if self.disabled and self.only:
75
            raise pytest.UsageError(
76
                "Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
77
                "automatically activated if xdist is on or you're missing the statistics dependency.")
78
        self.group_by = config.getoption("benchmark_group_by")
79
        self.save = config.getoption("benchmark_save")
80
        self.autosave = config.getoption("benchmark_autosave")
81
        self.save_data = config.getoption("benchmark_save_data")
82
        self.json = config.getoption("benchmark_json")
83
        self.compare = config.getoption("benchmark_compare")
84
        self.compare_fail = config.getoption("benchmark_compare_fail")
85
        self.name_format = NAME_FORMATTERS[config.getoption("benchmark_name")]
86
87
        self.storage = Storage(config.getoption("benchmark_storage"),
88
                               default_machine_id=self.machine_id, logger=self.logger)
89
        self.histogram = first_or_value(config.getoption("benchmark_histogram"), False)
90
91
    @cached_property
92
    def machine_info(self):
93
        obj = self.config.hook.pytest_benchmark_generate_machine_info(config=self.config)
94
        self.config.hook.pytest_benchmark_update_machine_info(
95
            config=self.config,
96
            machine_info=obj
97
        )
98
        return obj
99
100
    def prepare_benchmarks(self):
101
        for bench in self.benchmarks:
102
            if bench:
103
                compared = False
104
                for path, compared_mapping in self.compared_mapping.items():
105
                    if bench.fullname in compared_mapping:
106
                        compared = compared_mapping[bench.fullname]
107
                        source = short_filename(path, self.machine_id)
108
                        flat_bench = bench.as_dict(include_data=False, stats=False)
109
                        flat_bench.update(compared["stats"])
110
                        flat_bench["path"] = str(path)
111
                        flat_bench["source"] = source
112
                        if self.compare_fail:
113
                            for check in self.compare_fail:
114
                                fail = check.fails(bench, flat_bench)
115
                                if fail:
116
                                    self.performance_regressions.append((self.name_format(flat_bench), fail))
117
                        yield flat_bench
118
                flat_bench = bench.as_dict(include_data=False, flat=True)
119
                flat_bench["path"] = None
120
                flat_bench["source"] = compared and "NOW"
121
                yield flat_bench
122
123
    @property
124
    def next_num(self):
125
        files = self.storage.query("[0-9][0-9][0-9][0-9]_*")
126
        files.sort(reverse=True)
127
        if not files:
128
            return "0001"
129
        for f in files:
130
            try:
131
                return "%04i" % (int(str(f.name).split('_')[0]) + 1)
132
            except ValueError:
133
                raise
134
135
    def handle_saving(self):
136
        save = self.benchmarks and self.save or self.autosave
137
        if save or self.json:
138
            commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config)
139
            self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info)
140
141 View Code Duplication
        if self.json:
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
142
            output_json = self.config.hook.pytest_benchmark_generate_json(
143
                config=self.config,
144
                benchmarks=self.benchmarks,
145
                include_data=True,
146
                machine_info=self.machine_info,
147
                commit_info=commit_info,
148
            )
149
            self.config.hook.pytest_benchmark_update_json(
150
                config=self.config,
151
                benchmarks=self.benchmarks,
152
                output_json=output_json,
153
            )
154
            with self.json as fh:
155
                fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode())
156
            self.logger.info("Wrote benchmark data in: %s" % self.json, purple=True)
157
158 View Code Duplication
        if save:
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
159
            output_json = self.config.hook.pytest_benchmark_generate_json(
160
                config=self.config,
161
                benchmarks=self.benchmarks,
162
                include_data=self.save_data,
163
                machine_info=self.machine_info,
164
                commit_info=commit_info,
165
            )
166
            self.config.hook.pytest_benchmark_update_json(
167
                config=self.config,
168
                benchmarks=self.benchmarks,
169
                output_json=output_json,
170
            )
171
            output_file = self.storage.get("%s_%s.json" % (self.next_num, save))
172
            assert not output_file.exists()
173
174
            with output_file.open('wb') as fh:
175
                fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode())
176
            self.logger.info("Saved benchmark data in: %s" % output_file)
177
178
    def handle_loading(self):
179
        self.compared_mapping = {}
180
        if self.compare:
181
            if self.compare is True:
182
                compared_benchmarks = list(self.storage.load('[0-9][0-9][0-9][0-9]_'))[-1:]
183
            else:
184
                compared_benchmarks = list(self.storage.load(self.compare))
185
186
            if not compared_benchmarks:
187
                msg = "Can't compare. No benchmark files in %r" % str(self.storage)
188
                if self.compare is True:
189
                    msg += ". Can't load the previous benchmark."
190
                    code = "BENCHMARK-C2"
191
                else:
192
                    msg += " match %r." % self.compare
193
                    code = "BENCHMARK-C1"
194
                self.logger.warn(code, msg, fslocation=self.storage.location)
195
196
            for path, compared_benchmark in compared_benchmarks:
197
                self.config.hook.pytest_benchmark_compare_machine_info(
198
                    config=self.config,
199
                    benchmarksession=self,
200
                    machine_info=self.machine_info,
201
                    compared_benchmark=compared_benchmark,
202
                )
203
                self.compared_mapping[path] = dict(
204
                    (bench['fullname'], bench) for bench in compared_benchmark['benchmarks']
205
                )
206
                self.logger.info("Comparing against benchmarks from: %s" % path)
207
208
    def finish(self):
209
        self.handle_saving()
210
        self.handle_loading()
211
        prepared_benchmarks = list(self.prepare_benchmarks())
212
        if prepared_benchmarks:
213
            self.groups = self.config.hook.pytest_benchmark_group_stats(
214
                config=self.config,
215
                benchmarks=prepared_benchmarks,
216
                group_by=self.group_by
217
            )
218
219
    def display(self, tr):
220
        if not self.groups:
221
            return
222
223
        tr.ensure_newline()
224
        results_table = TableResults(
225
            columns=self.columns,
226
            sort=self.sort,
227
            histogram=self.histogram,
228
            name_format=self.name_format,
229
            logger=self.logger
230
        )
231
        results_table.display(tr, self.groups)
232
        self.check_regressions()
233
234
    def check_regressions(self):
235
        if self.compare_fail and not self.compared_mapping:
236
            raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.")
237
238
        if self.performance_regressions:
239
            self.logger.error("Performance has regressed:\n%s" % "\n".join(
240
                "\t%s - %s" % line for line in self.performance_regressions
241
            ))
242
            raise PerformanceRegression("Performance has regressed.")
243