1 | import json |
||
2 | import logging |
||
3 | import os |
||
4 | import sys |
||
5 | from io import BytesIO |
||
6 | from io import StringIO |
||
7 | from pathlib import Path |
||
8 | |||
9 | import py |
||
10 | import pytest |
||
11 | from freezegun import freeze_time |
||
12 | |||
13 | from pytest_benchmark import plugin |
||
14 | from pytest_benchmark.plugin import BenchmarkSession |
||
15 | from pytest_benchmark.plugin import pytest_benchmark_compare_machine_info |
||
16 | from pytest_benchmark.plugin import pytest_benchmark_generate_json |
||
17 | from pytest_benchmark.plugin import pytest_benchmark_group_stats |
||
18 | from pytest_benchmark.session import PerformanceRegression |
||
19 | from pytest_benchmark.stats import normalize_stats |
||
20 | from pytest_benchmark.storage.file import FileStorage |
||
21 | from pytest_benchmark.utils import NAME_FORMATTERS |
||
22 | from pytest_benchmark.utils import DifferenceRegressionCheck |
||
23 | from pytest_benchmark.utils import PercentageRegressionCheck |
||
24 | from pytest_benchmark.utils import get_machine_id |
||
25 | |||
26 | pytest_plugins = "pytester" |
||
27 | |||
28 | |||
29 | THIS = py.path.local(__file__) |
||
30 | STORAGE = THIS.dirpath(THIS.purebasename) |
||
31 | |||
32 | JSON_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open()) |
||
33 | JSON_DATA["machine_info"] = {'foo': 'bar'} |
||
34 | JSON_DATA["commit_info"] = {'foo': 'bar'} |
||
35 | list(normalize_stats(bench['stats']) for bench in JSON_DATA["benchmarks"]) |
||
36 | |||
37 | |||
38 | class Namespace(object): |
||
39 | def __init__(self, **kwargs): |
||
40 | self.__dict__.update(kwargs) |
||
41 | |||
42 | def __getitem__(self, item): |
||
43 | return self.__dict__[item] |
||
44 | |||
45 | def getoption(self, item, default=None): |
||
46 | try: |
||
47 | return self[item] |
||
48 | except KeyError: |
||
49 | return default |
||
50 | |||
51 | |||
52 | class LooseFileLike(BytesIO): |
||
53 | def close(self): |
||
54 | value = self.getvalue() |
||
55 | super(LooseFileLike, self).close() |
||
56 | self.getvalue = lambda: value |
||
57 | |||
58 | |||
59 | class MockSession(BenchmarkSession): |
||
60 | def __init__(self, name_format): |
||
61 | self.histogram = True |
||
62 | self.verbose = False |
||
63 | self.benchmarks = [] |
||
64 | self.performance_regressions = [] |
||
65 | self.sort = u"min" |
||
66 | self.compare = '0001' |
||
67 | logger = logging.getLogger(__name__) |
||
68 | self.logger = Namespace( |
||
69 | debug=lambda *args, **_kwargs: logger.debug(*args), |
||
70 | info=lambda *args, **_kwargs: logger.info(*args), |
||
71 | warn=lambda *args, **_kwargs: logger.warn(*args), |
||
72 | error=lambda *args, **_kwargs: logger.error(*args), |
||
73 | ) |
||
74 | self.machine_id = "FoobarOS" |
||
75 | self.machine_info = {'foo': 'bar'} |
||
76 | self.save = self.autosave = self.json = False |
||
77 | self.name_format = NAME_FORMATTERS[name_format] |
||
78 | self.options = { |
||
79 | 'min_rounds': 123, |
||
80 | 'min_time': 234, |
||
81 | 'max_time': 345, |
||
82 | 'use_cprofile': False, |
||
83 | } |
||
84 | self.cprofile_sort_by = 'cumtime' |
||
85 | self.compare_fail = [] |
||
86 | self.config = Namespace(hook=Namespace( |
||
87 | pytest_benchmark_group_stats=pytest_benchmark_group_stats, |
||
88 | pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'}, |
||
89 | pytest_benchmark_update_machine_info=lambda **kwargs: None, |
||
90 | pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info, |
||
91 | pytest_benchmark_generate_json=pytest_benchmark_generate_json, |
||
92 | pytest_benchmark_update_json=lambda **kwargs: None, |
||
93 | pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'}, |
||
94 | pytest_benchmark_update_commit_info=lambda **kwargs: None, |
||
95 | )) |
||
96 | self.storage = FileStorage(str(STORAGE), default_machine_id=get_machine_id(), logger=self.logger) |
||
97 | self.group_by = 'group' |
||
98 | self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr', |
||
99 | 'outliers', 'rounds', 'iterations', 'ops'] |
||
100 | for bench_file, data in reversed(list(self.storage.load("[0-9][0-9][0-9][0-9]_*"))): |
||
101 | self.benchmarks.extend( |
||
102 | Namespace( |
||
103 | as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench, cprofile='cumtime': |
||
104 | dict(_bench, **_bench["stats"]) if flat else dict(_bench), |
||
105 | name=bench['name'], |
||
106 | fullname=bench['fullname'], |
||
107 | group=bench['group'], |
||
108 | options=bench['options'], |
||
109 | has_error=False, |
||
110 | params=None, |
||
111 | **bench['stats'] |
||
112 | ) |
||
113 | for bench in data['benchmarks'] |
||
114 | ) |
||
115 | break |
||
116 | |||
117 | |||
118 | try: |
||
119 | text_type = unicode |
||
120 | except NameError: |
||
121 | text_type = str |
||
122 | |||
123 | |||
124 | def force_text(text): |
||
125 | if isinstance(text, text_type): |
||
126 | return text |
||
127 | else: |
||
128 | return text.decode('utf-8') |
||
129 | |||
130 | |||
131 | def force_bytes(text): |
||
132 | if isinstance(text, text_type): |
||
133 | return text.encode('utf-8') |
||
134 | else: |
||
135 | return text |
||
136 | |||
137 | |||
138 | @pytest.fixture(params=['short', 'normal', 'long']) |
||
139 | def name_format(request): |
||
140 | return request.param |
||
141 | |||
142 | View Code Duplication | ||
0 ignored issues
–
show
Duplication
introduced
by
![]() |
|||
143 | @pytest.fixture |
||
144 | def sess(request, name_format): |
||
145 | return MockSession(name_format) |
||
146 | |||
147 | |||
148 | def make_logger(sess): |
||
149 | output = StringIO() |
||
150 | sess.logger = Namespace( |
||
151 | warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)), |
||
152 | info=lambda text, **opts: output.write(force_text(text) + u'\n'), |
||
153 | error=lambda text: output.write(force_text(text) + u'\n'), |
||
154 | ) |
||
155 | sess.storage.logger = Namespace( |
||
156 | warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)), |
||
157 | info=lambda text, **opts: output.write(force_text(text) + u'\n'), |
||
158 | error=lambda text: output.write(force_text(text) + u'\n'), |
||
159 | ) |
||
160 | return output |
||
161 | |||
162 | |||
163 | def test_rendering(sess): |
||
164 | output = make_logger(sess) |
||
165 | sess.histogram = os.path.join('docs', 'sample') |
||
166 | sess.compare = '*/*' |
||
167 | sess.sort = 'name' |
||
168 | sess.handle_loading() |
||
169 | sess.finish() |
||
170 | sess.display(Namespace( |
||
171 | ensure_newline=lambda: None, |
||
172 | View Code Duplication | write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
|
0 ignored issues
–
show
|
|||
173 | write=lambda text, **opts: output.write(force_text(text)), |
||
174 | rewrite=lambda text, **opts: output.write(force_text(text)), |
||
175 | )) |
||
176 | |||
177 | |||
178 | def test_regression_checks(sess, name_format): |
||
179 | output = make_logger(sess) |
||
180 | sess.handle_loading() |
||
181 | sess.performance_regressions = [] |
||
182 | sess.compare_fail = [ |
||
183 | PercentageRegressionCheck("stddev", 5), |
||
184 | DifferenceRegressionCheck("max", 0.000001) |
||
185 | ] |
||
186 | sess.finish() |
||
187 | pytest.raises(PerformanceRegression, sess.display, Namespace( |
||
188 | ensure_newline=lambda: None, |
||
189 | write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
||
190 | write=lambda text, **opts: output.write(force_text(text)), |
||
191 | rewrite=lambda text, **opts: output.write(force_text(text)), |
||
192 | )) |
||
193 | print(output.getvalue()) |
||
194 | assert sess.performance_regressions == { |
||
195 | 'normal': [ |
||
196 | ('test_xfast_parametrized[0] (0001_b87b9aa)', |
||
197 | "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"), |
||
198 | ('test_xfast_parametrized[0] (0001_b87b9aa)', |
||
199 | "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000") |
||
200 | ], |
||
201 | 'short': [ |
||
202 | ('xfast_parametrized[0] (0001)', |
||
203 | "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"), |
||
204 | ('xfast_parametrized[0] (0001)', |
||
205 | "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000") |
||
206 | ], |
||
207 | 'long': [ |
||
208 | ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)', |
||
209 | "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"), |
||
210 | ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)', |
||
211 | "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000") |
||
212 | ], |
||
213 | }[name_format] |
||
214 | output = make_logger(sess) |
||
215 | pytest.raises(PerformanceRegression, sess.check_regressions) |
||
216 | print(output.getvalue()) |
||
217 | assert output.getvalue() == { |
||
218 | 'short': """Performance has regressed: |
||
219 | \txfast_parametrized[0] (0001) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000 |
||
220 | \txfast_parametrized[0] (0001) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000 |
||
221 | """, |
||
222 | 'normal': """Performance has regressed: |
||
223 | \ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000 |
||
224 | \ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000 |
||
225 | """, |
||
226 | 'long': """Performance has regressed: |
||
227 | View Code Duplication | \ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000 |
|
0 ignored issues
–
show
|
|||
228 | \ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000 |
||
229 | """ |
||
230 | }[name_format] |
||
231 | |||
232 | |||
233 | @pytest.mark.skipif(sys.version_info[:2] < (2, 7), |
||
234 | reason="Something weird going on, see: https://bugs.python.org/issue4482") |
||
235 | def test_regression_checks_inf(sess, name_format): |
||
236 | output = make_logger(sess) |
||
237 | sess.compare = '0002' |
||
238 | sess.handle_loading() |
||
239 | sess.performance_regressions = [] |
||
240 | sess.compare_fail = [ |
||
241 | PercentageRegressionCheck("stddev", 5), |
||
242 | DifferenceRegressionCheck("max", 0.000001) |
||
243 | ] |
||
244 | sess.finish() |
||
245 | pytest.raises(PerformanceRegression, sess.display, Namespace( |
||
246 | ensure_newline=lambda: None, |
||
247 | write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
||
248 | write=lambda text, **opts: output.write(force_text(text)), |
||
249 | rewrite=lambda text, **opts: output.write(force_text(text)), |
||
250 | )) |
||
251 | print(output.getvalue()) |
||
252 | assert sess.performance_regressions == { |
||
253 | 'normal': [ |
||
254 | ('test_xfast_parametrized[0] (0002_b87b9aa)', |
||
255 | "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"), |
||
256 | ('test_xfast_parametrized[0] (0002_b87b9aa)', |
||
257 | "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000") |
||
258 | ], |
||
259 | 'short': [ |
||
260 | ('xfast_parametrized[0] (0002)', |
||
261 | "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"), |
||
262 | ('xfast_parametrized[0] (0002)', |
||
263 | "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000") |
||
264 | ], |
||
265 | 'long': [ |
||
266 | ('tests/test_normal.py::test_xfast_parametrized[0] ' |
||
267 | '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)', |
||
268 | "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"), |
||
269 | ('tests/test_normal.py::test_xfast_parametrized[0] ' |
||
270 | '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)', |
||
271 | "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > " |
||
272 | '0.000001000') |
||
273 | ] |
||
274 | }[name_format] |
||
275 | output = make_logger(sess) |
||
276 | pytest.raises(PerformanceRegression, sess.check_regressions) |
||
277 | print(output.getvalue()) |
||
278 | assert output.getvalue() == { |
||
279 | 'short': """Performance has regressed: |
||
280 | \txfast_parametrized[0] (0002) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000 |
||
281 | \txfast_parametrized[0] (0002) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000 |
||
282 | """, |
||
283 | 'normal': """Performance has regressed: |
||
284 | \ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000 |
||
285 | \ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000 |
||
286 | """, |
||
287 | 'long': """Performance has regressed: |
||
288 | View Code Duplication | \ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000 |
|
0 ignored issues
–
show
|
|||
289 | \ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000 |
||
290 | """ |
||
291 | }[name_format] |
||
292 | |||
293 | |||
294 | def test_compare_1(sess, LineMatcher): |
||
295 | output = make_logger(sess) |
||
296 | sess.handle_loading() |
||
297 | sess.finish() |
||
298 | sess.display(Namespace( |
||
299 | ensure_newline=lambda: None, |
||
300 | write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
||
301 | write=lambda text, **opts: output.write(force_text(text)), |
||
302 | rewrite=lambda text, **opts: output.write(force_text(text)), |
||
303 | )) |
||
304 | print(output.getvalue()) |
||
305 | LineMatcher(output.getvalue().splitlines()).fnmatch_lines([ |
||
306 | 'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}', |
||
307 | 'Comparing against benchmarks from: 0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted' |
||
308 | '-changes.json', |
||
309 | '', |
||
310 | '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*', |
||
311 | 'Name (time in ns) * Min *Max Mean StdDev Median IQR Outliers Rounds Iterations OPS (Mops/s) *', |
||
312 | '-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
||
313 | '*xfast_parametrized[[]0[]] (0001*) 217.3145 (1.0) 11*447.3891 (1.0) 262.2408 (1.00) 214.0442 (1.0) 220.1664 (1.00) 38.2154 (2.03) 90;1878 9987 418 3.8133 (1.00)*', |
||
314 | '*xfast_parametrized[[]0[]] (NOW) * 217.9511 (1.00) 13*290.0380 (1.16) 261.2051 (1.0) 263.9842 (1.23) 220.1638 (1.0) 18.8080 (1.0) 160;1726 9710 431 3.8284 (1.0)*', |
||
315 | '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
||
316 | View Code Duplication | 'Legend:', |
|
0 ignored issues
–
show
|
|||
317 | ' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', |
||
318 | ' OPS: Operations Per Second, computed as 1 / Mean', |
||
319 | ]) |
||
320 | |||
321 | |||
322 | def test_compare_2(sess, LineMatcher): |
||
323 | output = make_logger(sess) |
||
324 | sess.compare = '0002' |
||
325 | sess.handle_loading() |
||
326 | sess.finish() |
||
327 | sess.display(Namespace( |
||
328 | ensure_newline=lambda: None, |
||
329 | write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
||
330 | section=lambda line, **opts: output.write(force_text(line) + u'\n'), |
||
331 | write=lambda text, **opts: output.write(force_text(text)), |
||
332 | rewrite=lambda text, **opts: output.write(force_text(text)), |
||
333 | )) |
||
334 | print(output.getvalue()) |
||
335 | LineMatcher(output.getvalue().splitlines()).fnmatch_lines([ |
||
336 | 'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}', |
||
337 | 'Comparing against benchmarks from: 0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes.json', |
||
338 | '', |
||
339 | '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*', |
||
340 | 'Name (time in ns) * Min *Max Mean StdDev Median IQR Outliers Rounds Iterations OPS (Mops/s)*', |
||
341 | '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
||
342 | '*xfast_parametrized[[]0[]] (0002*) 216.9028 (1.0) 7*739.2997 (1.0) 254.0585 (1.0) 0.0000 (1.0) 219.8103 (1.0) 27.3309 (1.45) 235;1688 11009 410 3.9361 (1.0)*', |
||
343 | '*xfast_parametrized[[]0[]] (NOW) * 217.9511 (1.00) 13*290.0380 (1.72) 261.2051 (1.03) 263.9842 (inf) 220.1638 (1.00) 18.8080 (1.0) 160;1726 9710 431 3.8284 (0.97)*', |
||
344 | '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
||
345 | 'Legend:', |
||
346 | ' Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', |
||
347 | ' OPS: Operations Per Second, computed as 1 / Mean', |
||
348 | ]) |
||
349 | |||
350 | @freeze_time("2015-08-15T00:04:18.687119") |
||
351 | def test_save_json(sess, tmpdir, monkeypatch): |
||
352 | monkeypatch.setattr(plugin, '__version__', '2.5.0') |
||
353 | sess.save = False |
||
354 | sess.autosave = False |
||
355 | sess.json = LooseFileLike() |
||
356 | View Code Duplication | sess.save_data = False |
|
0 ignored issues
–
show
|
|||
357 | sess.handle_saving() |
||
358 | assert tmpdir.listdir() == [] |
||
359 | assert json.loads(sess.json.getvalue().decode()) == JSON_DATA |
||
360 | |||
361 | |||
362 | @freeze_time("2015-08-15T00:04:18.687119") |
||
363 | def test_save_with_name(sess, tmpdir, monkeypatch): |
||
364 | monkeypatch.setattr(plugin, '__version__', '2.5.0') |
||
365 | sess.save = 'foobar' |
||
366 | sess.autosave = True |
||
367 | sess.json = None |
||
368 | sess.save_data = False |
||
369 | sess.storage.path = Path(str(tmpdir)) |
||
370 | sess.handle_saving() |
||
371 | View Code Duplication | files = list(Path(str(tmpdir)).rglob('*.json')) |
|
0 ignored issues
–
show
|
|||
372 | print(files) |
||
373 | assert len(files) == 1 |
||
374 | assert json.load(files[0].open('rU')) == JSON_DATA |
||
375 | |||
376 | |||
377 | @freeze_time("2015-08-15T00:04:18.687119") |
||
378 | def test_save_no_name(sess, tmpdir, monkeypatch): |
||
379 | monkeypatch.setattr(plugin, '__version__', '2.5.0') |
||
380 | sess.save = True |
||
381 | sess.autosave = True |
||
382 | sess.json = None |
||
383 | sess.save_data = False |
||
384 | sess.storage.path = Path(str(tmpdir)) |
||
385 | sess.handle_saving() |
||
386 | files = list(Path(str(tmpdir)).rglob('*.json')) |
||
387 | assert len(files) == 1 |
||
388 | assert json.load(files[0].open('rU')) == JSON_DATA |
||
389 | |||
390 | |||
391 | @freeze_time("2015-08-15T00:04:18.687119") |
||
392 | def test_save_with_error(sess, tmpdir, monkeypatch): |
||
393 | monkeypatch.setattr(plugin, '__version__', '2.5.0') |
||
394 | sess.save = True |
||
395 | sess.autosave = True |
||
396 | sess.json = None |
||
397 | sess.save_data = False |
||
398 | sess.storage.path = Path(str(tmpdir)) |
||
399 | for bench in sess.benchmarks: |
||
400 | bench.has_error = True |
||
401 | sess.handle_saving() |
||
402 | files = list(Path(str(tmpdir)).rglob('*.json')) |
||
403 | assert len(files) == 1 |
||
404 | assert json.load(files[0].open('rU')) == { |
||
405 | 'benchmarks': [], |
||
406 | 'commit_info': {'foo': 'bar'}, |
||
407 | 'datetime': '2015-08-15T00:04:18.687119', |
||
408 | 'machine_info': {'foo': 'bar'}, |
||
409 | 'version': '2.5.0' |
||
410 | } |
||
411 | |||
412 | |||
413 | @freeze_time("2015-08-15T00:04:18.687119") |
||
414 | def test_autosave(sess, tmpdir, monkeypatch): |
||
415 | monkeypatch.setattr(plugin, '__version__', '2.5.0') |
||
416 | sess.save = False |
||
417 | sess.autosave = True |
||
418 | sess.json = None |
||
419 | sess.save_data = False |
||
420 | sess.storage.path = Path(str(tmpdir)) |
||
421 | sess.handle_saving() |
||
422 | files = list(Path(str(tmpdir)).rglob('*.json')) |
||
423 | assert len(files) == 1 |
||
424 | assert json.load(files[0].open('rU')) == JSON_DATA |
||
425 |