Completed
Push — master ( 719867...30d2f4 )
by Ionel Cristian
01:05
created

BenchmarkSession.save_json()   A

Complexity

Conditions 2

Size

Total Lines 4

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 2
c 0
b 0
f 0
dl 0
loc 4
rs 10
1
from __future__ import division
2
from __future__ import print_function
3
4
import pytest
5
6
from .fixture import statistics
7
from .fixture import statistics_error
8
from .logger import Logger
9
from .table import TableResults
10
from .utils import NAME_FORMATTERS
11
from .utils import SecondsDecimal
12
from .utils import cached_property
13
from .utils import first_or_value
14
from .utils import get_machine_id
15
from .utils import load_storage
16
from .utils import load_timer
17
from .utils import short_filename
18
from .utils import safe_dumps
19
20
21
class PerformanceRegression(pytest.UsageError):
22
    pass
23
24
25
class BenchmarkSession(object):
26
    compared_mapping = None
27
    groups = None
28
29
    def __init__(self, config):
30
        self.verbose = config.getoption("benchmark_verbose")
31
        self.logger = Logger(self.verbose, config)
32
        self.config = config
33
        self.performance_regressions = []
34
        self.benchmarks = []
35
        self.machine_id = get_machine_id()
36
        self.storage = load_storage(
37
            config.getoption("benchmark_storage"),
38
            logger=self.logger,
39
            default_machine_id=self.machine_id
40
        )
41
        self.options = dict(
42
            min_time=SecondsDecimal(config.getoption("benchmark_min_time")),
43
            min_rounds=config.getoption("benchmark_min_rounds"),
44
            max_time=SecondsDecimal(config.getoption("benchmark_max_time")),
45
            timer=load_timer(config.getoption("benchmark_timer")),
46
            calibration_precision=config.getoption("benchmark_calibration_precision"),
47
            disable_gc=config.getoption("benchmark_disable_gc"),
48
            warmup=config.getoption("benchmark_warmup"),
49
            warmup_iterations=config.getoption("benchmark_warmup_iterations"),
50
            use_cprofile=bool(config.getoption("benchmark_cprofile")),
51
        )
52
        self.skip = config.getoption("benchmark_skip")
53
        self.disabled = config.getoption("benchmark_disable") and not config.getoption("benchmark_enable")
54
        self.cprofile_sort_by = config.getoption("benchmark_cprofile")
55
56
        if config.getoption("dist", "no") != "no" and not self.skip:
57
            self.logger.warn(
58
                "BENCHMARK-U2",
59
                "Benchmarks are automatically disabled because xdist plugin is active."
60
                "Benchmarks cannot be performed reliably in a parallelized environment.",
61
                fslocation="::"
62
            )
63
            self.disabled = True
64
        if hasattr(config, "slaveinput"):
65
            self.disabled = True
66
        if not statistics:
67
            self.logger.warn(
68
                "BENCHMARK-U3",
69
                "Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" %
70
                statistics_error,
71
                fslocation="::"
72
            )
73
            self.disabled = True
74
75
        self.only = config.getoption("benchmark_only")
76
        self.sort = config.getoption("benchmark_sort")
77
        self.columns = config.getoption("benchmark_columns")
78
        if self.skip and self.only:
79
            raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.")
80
        if self.disabled and self.only:
81
            raise pytest.UsageError(
82
                "Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is "
83
                "automatically activated if xdist is on or you're missing the statistics dependency.")
84
        self.group_by = config.getoption("benchmark_group_by")
85
        self.save = config.getoption("benchmark_save")
86
        self.autosave = config.getoption("benchmark_autosave")
87
        self.save_data = config.getoption("benchmark_save_data")
88
        self.json = config.getoption("benchmark_json")
89
        self.compare = config.getoption("benchmark_compare")
90
        self.compare_fail = config.getoption("benchmark_compare_fail")
91
        self.name_format = NAME_FORMATTERS[config.getoption("benchmark_name")]
92
93
        self.histogram = first_or_value(config.getoption("benchmark_histogram"), False)
94
95
    @cached_property
96
    def machine_info(self):
97
        obj = self.config.hook.pytest_benchmark_generate_machine_info(config=self.config)
98
        self.config.hook.pytest_benchmark_update_machine_info(
99
            config=self.config,
100
            machine_info=obj
101
        )
102
        return obj
103
104
    def prepare_benchmarks(self):
105
        for bench in self.benchmarks:
106
            if bench:
107
                compared = False
108
                for path, compared_mapping in self.compared_mapping.items():
109
                    if bench.fullname in compared_mapping:
110
                        compared = compared_mapping[bench.fullname]
111
                        source = short_filename(path, self.machine_id)
112
                        flat_bench = bench.as_dict(include_data=False, stats=False, cprofile=self.cprofile_sort_by)
113
                        flat_bench.update(compared["stats"])
114
                        flat_bench["path"] = str(path)
115
                        flat_bench["source"] = source
116
                        if self.compare_fail:
117
                            for check in self.compare_fail:
118
                                fail = check.fails(bench, flat_bench)
119
                                if fail:
120
                                    self.performance_regressions.append((self.name_format(flat_bench), fail))
121
                        yield flat_bench
122
                flat_bench = bench.as_dict(include_data=False, flat=True, cprofile=self.cprofile_sort_by)
123
                flat_bench["path"] = None
124
                flat_bench["source"] = compared and "NOW"
125
                yield flat_bench
126
127
    def save_json(self, output_json):
128
        with self.json as fh:
129
            fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode())
130
        self.logger.info("Wrote benchmark data in: %s" % self.json, purple=True)
131
132
    def handle_saving(self):
133
        save = self.benchmarks and self.save or self.autosave
134
        if save or self.json:
135
            commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config)
136
            self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info)
137
138 View Code Duplication
        if self.json:
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
139
            output_json = self.config.hook.pytest_benchmark_generate_json(
140
                config=self.config,
141
                benchmarks=self.benchmarks,
142
                include_data=True,
143
                machine_info=self.machine_info,
144
                commit_info=commit_info,
145
            )
146
            self.config.hook.pytest_benchmark_update_json(
147
                config=self.config,
148
                benchmarks=self.benchmarks,
149
                output_json=output_json,
150
            )
151
            self.save_json(output_json)
152
153 View Code Duplication
        if save:
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
154
            output_json = self.config.hook.pytest_benchmark_generate_json(
155
                config=self.config,
156
                benchmarks=self.benchmarks,
157
                include_data=self.save_data,
158
                machine_info=self.machine_info,
159
                commit_info=commit_info,
160
            )
161
            self.config.hook.pytest_benchmark_update_json(
162
                config=self.config,
163
                benchmarks=self.benchmarks,
164
                output_json=output_json,
165
            )
166
            self.storage.save(output_json, save)
167
168
    def handle_loading(self):
169
        compared_mapping = {}
170
        if self.compare:
171
            if self.compare is True:
172
                compared_benchmarks = list(self.storage.load())[-1:]
173
            else:
174
                compared_benchmarks = list(self.storage.load(self.compare))
175
176
            if not compared_benchmarks:
177
                msg = "Can't compare. No benchmark files in %r" % str(self.storage)
178
                if self.compare is True:
179
                    msg += ". Can't load the previous benchmark."
180
                    code = "BENCHMARK-C2"
181
                else:
182
                    msg += " match %r." % self.compare
183
                    code = "BENCHMARK-C1"
184
                self.logger.warn(code, msg, fslocation=self.storage.location)
185
186
            for path, compared_benchmark in compared_benchmarks:
187
                self.config.hook.pytest_benchmark_compare_machine_info(
188
                    config=self.config,
189
                    benchmarksession=self,
190
                    machine_info=self.machine_info,
191
                    compared_benchmark=compared_benchmark,
192
                )
193
                compared_mapping[path] = dict(
194
                    (bench['fullname'], bench) for bench in compared_benchmark['benchmarks']
195
                )
196
                self.logger.info("Comparing against benchmarks from: %s" % path)
197
        self.compared_mapping = compared_mapping
198
199
    def finish(self):
200
        self.handle_saving()
201
        self.handle_loading()
202
        prepared_benchmarks = list(self.prepare_benchmarks())
203
        if prepared_benchmarks:
204
            self.groups = self.config.hook.pytest_benchmark_group_stats(
205
                config=self.config,
206
                benchmarks=prepared_benchmarks,
207
                group_by=self.group_by
208
            )
209
210
    def display(self, tr):
211
        if not self.groups:
212
            return
213
214
        tr.ensure_newline()
215
        results_table = TableResults(
216
            columns=self.columns,
217
            sort=self.sort,
218
            histogram=self.histogram,
219
            name_format=self.name_format,
220
            logger=self.logger
221
        )
222
        results_table.display(tr, self.groups)
223
        self.check_regressions()
224
        self.display_cprofile(tr)
225
226
    def check_regressions(self):
227
        if self.compare_fail and not self.compared_mapping:
228
            raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.")
229
230
        if self.performance_regressions:
231
            self.logger.error("Performance has regressed:\n%s" % "\n".join(
232
                "\t%s - %s" % line for line in self.performance_regressions
233
            ))
234
            raise PerformanceRegression("Performance has regressed.")
235
236
    def display_cprofile(self, tr):
237
        if self.options["use_cprofile"]:
238
            tr.section("cProfile information")
239
            tr.write_line("Time in s")
240
            for group in self.groups:
241
                group_name, benchmarks = group
242
                for benchmark in benchmarks:
243
                    tr.write(benchmark["fullname"], yellow=True)
244
                    if benchmark["source"]:
245
                        tr.write_line(" ({})".format((benchmark["source"])))
246
                    else:
247
                        tr.write("\n")
248
                    tr.write_line("ncalls\ttottime\tpercall\tcumtime\tpercall\tfilename:lineno(function)")
249
                    for function_info in benchmark["cprofile"]:
250
                        line = ("{ncalls_recursion}\t{tottime:.{prec}f}\t{tottime_per:.{prec}f}\t{cumtime:.{prec}f}"
251
                                "\t{cumtime_per:.{prec}f}\t{function_name}").format(
252
                            prec=4, **function_info)
253
                        tr.write_line(line)
254
                    tr.write("\n")
255