1 | from __future__ import division |
||
2 | from __future__ import print_function |
||
3 | |||
4 | import pytest |
||
5 | |||
6 | from .fixture import statistics |
||
7 | from .fixture import statistics_error |
||
8 | from .logger import Logger |
||
9 | from .table import TableResults |
||
10 | from .utils import NAME_FORMATTERS |
||
11 | from .utils import SecondsDecimal |
||
12 | from .utils import cached_property |
||
13 | from .utils import first_or_value |
||
14 | from .utils import get_machine_id |
||
15 | from .utils import load_storage |
||
16 | from .utils import load_timer |
||
17 | from .utils import safe_dumps |
||
18 | from .utils import short_filename |
||
19 | |||
20 | |||
21 | class PerformanceRegression(pytest.UsageError): |
||
22 | pass |
||
23 | |||
24 | |||
25 | class BenchmarkSession(object): |
||
26 | compared_mapping = None |
||
27 | groups = None |
||
28 | |||
29 | def __init__(self, config): |
||
30 | self.verbose = config.getoption("benchmark_verbose") |
||
31 | self.logger = Logger(self.verbose, config) |
||
32 | self.config = config |
||
33 | self.performance_regressions = [] |
||
34 | self.benchmarks = [] |
||
35 | self.machine_id = get_machine_id() |
||
36 | self.storage = load_storage( |
||
37 | config.getoption("benchmark_storage"), |
||
38 | logger=self.logger, |
||
39 | default_machine_id=self.machine_id, |
||
40 | netrc=config.getoption("benchmark_netrc") |
||
41 | ) |
||
42 | self.options = dict( |
||
43 | min_time=SecondsDecimal(config.getoption("benchmark_min_time")), |
||
44 | min_rounds=config.getoption("benchmark_min_rounds"), |
||
45 | max_time=SecondsDecimal(config.getoption("benchmark_max_time")), |
||
46 | timer=load_timer(config.getoption("benchmark_timer")), |
||
47 | calibration_precision=config.getoption("benchmark_calibration_precision"), |
||
48 | disable_gc=config.getoption("benchmark_disable_gc"), |
||
49 | warmup=config.getoption("benchmark_warmup"), |
||
50 | warmup_iterations=config.getoption("benchmark_warmup_iterations"), |
||
51 | use_cprofile=bool(config.getoption("benchmark_cprofile")), |
||
52 | ) |
||
53 | self.skip = config.getoption("benchmark_skip") |
||
54 | self.disabled = config.getoption("benchmark_disable") and not config.getoption("benchmark_enable") |
||
55 | self.cprofile_sort_by = config.getoption("benchmark_cprofile") |
||
56 | |||
57 | if config.getoption("dist", "no") != "no" and not self.skip: |
||
58 | self.logger.warn( |
||
59 | "BENCHMARK-U2", |
||
60 | "Benchmarks are automatically disabled because xdist plugin is active." |
||
61 | "Benchmarks cannot be performed reliably in a parallelized environment.", |
||
62 | fslocation="::" |
||
63 | ) |
||
64 | self.disabled = True |
||
65 | if hasattr(config, "slaveinput"): |
||
66 | self.disabled = True |
||
67 | if not statistics: |
||
68 | self.logger.warn( |
||
69 | "BENCHMARK-U3", |
||
70 | "Benchmarks are automatically disabled because we could not import `statistics`\n\n%s" % |
||
71 | statistics_error, |
||
72 | fslocation="::" |
||
73 | ) |
||
74 | self.disabled = True |
||
75 | |||
76 | self.only = config.getoption("benchmark_only") |
||
77 | self.sort = config.getoption("benchmark_sort") |
||
78 | self.columns = config.getoption("benchmark_columns") |
||
79 | if self.skip and self.only: |
||
80 | raise pytest.UsageError("Can't have both --benchmark-only and --benchmark-skip options.") |
||
81 | if self.disabled and self.only: |
||
82 | raise pytest.UsageError( |
||
83 | "Can't have both --benchmark-only and --benchmark-disable options. Note that --benchmark-disable is " |
||
84 | "automatically activated if xdist is on or you're missing the statistics dependency.") |
||
85 | self.group_by = config.getoption("benchmark_group_by") |
||
86 | self.save = config.getoption("benchmark_save") |
||
87 | self.autosave = config.getoption("benchmark_autosave") |
||
88 | self.save_data = config.getoption("benchmark_save_data") |
||
89 | self.json = config.getoption("benchmark_json") |
||
90 | self.compare = config.getoption("benchmark_compare") |
||
91 | self.compare_fail = config.getoption("benchmark_compare_fail") |
||
92 | self.name_format = NAME_FORMATTERS[config.getoption("benchmark_name")] |
||
93 | |||
94 | self.histogram = first_or_value(config.getoption("benchmark_histogram"), False) |
||
95 | |||
96 | @cached_property |
||
97 | def machine_info(self): |
||
98 | obj = self.config.hook.pytest_benchmark_generate_machine_info(config=self.config) |
||
99 | self.config.hook.pytest_benchmark_update_machine_info( |
||
100 | config=self.config, |
||
101 | machine_info=obj |
||
102 | ) |
||
103 | return obj |
||
104 | |||
105 | def prepare_benchmarks(self): |
||
106 | for bench in self.benchmarks: |
||
107 | if bench: |
||
108 | compared = False |
||
109 | for path, compared_mapping in self.compared_mapping.items(): |
||
110 | if bench.fullname in compared_mapping: |
||
111 | compared = compared_mapping[bench.fullname] |
||
112 | source = short_filename(path, self.machine_id) |
||
113 | flat_bench = bench.as_dict(include_data=False, stats=False, cprofile=self.cprofile_sort_by) |
||
114 | flat_bench.update(compared["stats"]) |
||
115 | flat_bench["path"] = str(path) |
||
116 | flat_bench["source"] = source |
||
117 | if self.compare_fail: |
||
118 | for check in self.compare_fail: |
||
119 | fail = check.fails(bench, flat_bench) |
||
120 | if fail: |
||
121 | self.performance_regressions.append((self.name_format(flat_bench), fail)) |
||
122 | yield flat_bench |
||
123 | flat_bench = bench.as_dict(include_data=False, flat=True, cprofile=self.cprofile_sort_by) |
||
124 | flat_bench["path"] = None |
||
125 | flat_bench["source"] = compared and "NOW" |
||
126 | yield flat_bench |
||
127 | |||
128 | def save_json(self, output_json): |
||
129 | with self.json as fh: |
||
130 | fh.write(safe_dumps(output_json, ensure_ascii=True, indent=4).encode()) |
||
131 | self.logger.info("Wrote benchmark data in: %s" % self.json, purple=True) |
||
132 | |||
133 | def handle_saving(self): |
||
134 | save = self.save or self.autosave |
||
135 | if save or self.json: |
||
136 | if not self.benchmarks: |
||
137 | self.logger.warn("BENCHMARK-U2", "Not saving anything, no benchmarks have been run!") |
||
138 | return |
||
139 | commit_info = self.config.hook.pytest_benchmark_generate_commit_info(config=self.config) |
||
140 | self.config.hook.pytest_benchmark_update_commit_info(config=self.config, commit_info=commit_info) |
||
141 | |||
142 | View Code Duplication | if self.json: |
|
0 ignored issues
–
show
Duplication
introduced
by
Loading history...
|
|||
143 | output_json = self.config.hook.pytest_benchmark_generate_json( |
||
144 | config=self.config, |
||
145 | benchmarks=self.benchmarks, |
||
146 | include_data=True, |
||
147 | machine_info=self.machine_info, |
||
148 | commit_info=commit_info, |
||
149 | ) |
||
150 | self.config.hook.pytest_benchmark_update_json( |
||
151 | config=self.config, |
||
152 | benchmarks=self.benchmarks, |
||
153 | output_json=output_json, |
||
154 | ) |
||
155 | self.save_json(output_json) |
||
156 | |||
157 | View Code Duplication | if save: |
|
0 ignored issues
–
show
|
|||
158 | output_json = self.config.hook.pytest_benchmark_generate_json( |
||
159 | config=self.config, |
||
160 | benchmarks=self.benchmarks, |
||
161 | include_data=self.save_data, |
||
162 | machine_info=self.machine_info, |
||
163 | commit_info=commit_info, |
||
164 | ) |
||
165 | self.config.hook.pytest_benchmark_update_json( |
||
166 | config=self.config, |
||
167 | benchmarks=self.benchmarks, |
||
168 | output_json=output_json, |
||
169 | ) |
||
170 | self.storage.save(output_json, save) |
||
171 | |||
172 | def handle_loading(self): |
||
173 | compared_mapping = {} |
||
174 | if self.compare: |
||
175 | if self.compare is True: |
||
176 | compared_benchmarks = list(self.storage.load())[-1:] |
||
177 | else: |
||
178 | compared_benchmarks = list(self.storage.load(self.compare)) |
||
179 | |||
180 | if not compared_benchmarks: |
||
181 | msg = "Can't compare. No benchmark files in %r" % str(self.storage) |
||
182 | if self.compare is True: |
||
183 | msg += ". Can't load the previous benchmark." |
||
184 | code = "BENCHMARK-C2" |
||
185 | else: |
||
186 | msg += " match %r." % self.compare |
||
187 | code = "BENCHMARK-C1" |
||
188 | self.logger.warn(code, msg, fslocation=self.storage.location) |
||
189 | |||
190 | for path, compared_benchmark in compared_benchmarks: |
||
191 | self.config.hook.pytest_benchmark_compare_machine_info( |
||
192 | config=self.config, |
||
193 | benchmarksession=self, |
||
194 | machine_info=self.machine_info, |
||
195 | compared_benchmark=compared_benchmark, |
||
196 | ) |
||
197 | compared_mapping[path] = dict( |
||
198 | (bench['fullname'], bench) for bench in compared_benchmark['benchmarks'] |
||
199 | ) |
||
200 | self.logger.info("Comparing against benchmarks from: %s" % path, newline=False) |
||
201 | self.compared_mapping = compared_mapping |
||
202 | |||
203 | def finish(self): |
||
204 | self.handle_saving() |
||
205 | prepared_benchmarks = list(self.prepare_benchmarks()) |
||
206 | if prepared_benchmarks: |
||
207 | self.groups = self.config.hook.pytest_benchmark_group_stats( |
||
208 | config=self.config, |
||
209 | benchmarks=prepared_benchmarks, |
||
210 | group_by=self.group_by |
||
211 | ) |
||
212 | |||
213 | def display(self, tr): |
||
214 | if not self.groups: |
||
215 | return |
||
216 | |||
217 | tr.ensure_newline() |
||
218 | results_table = TableResults( |
||
219 | columns=self.columns, |
||
220 | sort=self.sort, |
||
221 | histogram=self.histogram, |
||
222 | name_format=self.name_format, |
||
223 | logger=self.logger |
||
224 | ) |
||
225 | results_table.display(tr, self.groups) |
||
226 | self.check_regressions() |
||
227 | self.display_cprofile(tr) |
||
228 | |||
229 | def check_regressions(self): |
||
230 | if self.compare_fail and not self.compared_mapping: |
||
231 | raise pytest.UsageError("--benchmark-compare-fail requires valid --benchmark-compare.") |
||
232 | |||
233 | if self.performance_regressions: |
||
234 | self.logger.error("Performance has regressed:\n%s" % "\n".join( |
||
235 | "\t%s - %s" % line for line in self.performance_regressions |
||
236 | )) |
||
237 | raise PerformanceRegression("Performance has regressed.") |
||
238 | |||
239 | def display_cprofile(self, tr): |
||
240 | if self.options["use_cprofile"]: |
||
241 | tr.section("cProfile information") |
||
242 | tr.write_line("Time in s") |
||
243 | for group in self.groups: |
||
244 | group_name, benchmarks = group |
||
245 | for benchmark in benchmarks: |
||
246 | tr.write(benchmark["fullname"], yellow=True) |
||
247 | if benchmark["source"]: |
||
248 | tr.write_line(" ({})".format((benchmark["source"]))) |
||
249 | else: |
||
250 | tr.write("\n") |
||
251 | tr.write_line("ncalls\ttottime\tpercall\tcumtime\tpercall\tfilename:lineno(function)") |
||
252 | for function_info in benchmark["cprofile"]: |
||
253 | line = ("{ncalls_recursion}\t{tottime:.{prec}f}\t{tottime_per:.{prec}f}\t{cumtime:.{prec}f}" |
||
254 | "\t{cumtime_per:.{prec}f}\t{function_name}").format( |
||
255 | prec=4, **function_info) |
||
256 | tr.write_line(line) |
||
257 | tr.write("\n") |
||
258 |