1
|
|
|
import json |
2
|
|
|
import logging |
3
|
|
|
import os |
4
|
|
|
import sys |
5
|
|
|
from io import BytesIO |
6
|
|
|
from io import StringIO |
7
|
|
|
|
8
|
|
|
import py |
9
|
|
|
import pytest |
10
|
|
|
from freezegun import freeze_time |
11
|
|
|
from pathlib import Path |
12
|
|
|
|
13
|
|
|
from pytest_benchmark import plugin |
14
|
|
|
from pytest_benchmark.plugin import BenchmarkSession |
15
|
|
|
from pytest_benchmark.plugin import pytest_benchmark_compare_machine_info |
16
|
|
|
from pytest_benchmark.plugin import pytest_benchmark_generate_json |
17
|
|
|
from pytest_benchmark.plugin import pytest_benchmark_group_stats |
18
|
|
|
from pytest_benchmark.session import PerformanceRegression |
19
|
|
|
from pytest_benchmark.storage import Storage |
20
|
|
|
from pytest_benchmark.utils import NAME_FORMATTERS |
21
|
|
|
from pytest_benchmark.utils import DifferenceRegressionCheck |
22
|
|
|
from pytest_benchmark.utils import PercentageRegressionCheck |
23
|
|
|
from pytest_benchmark.utils import get_machine_id |
24
|
|
|
|
25
|
|
|
|
26
|
|
|
pytest_plugins = "pytester" |
27
|
|
|
|
28
|
|
|
|
29
|
|
|
THIS = py.path.local(__file__) |
30
|
|
|
STORAGE = THIS.dirpath(THIS.purebasename) |
31
|
|
|
|
32
|
|
|
SAVE_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open()) |
33
|
|
|
JSON_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open()) |
34
|
|
|
SAVE_DATA["machine_info"] = JSON_DATA["machine_info"] = {'foo': 'bar'} |
35
|
|
|
SAVE_DATA["commit_info"] = JSON_DATA["commit_info"] = {'foo': 'bar'} |
36
|
|
|
|
37
|
|
|
|
38
|
|
|
class Namespace(object): |
39
|
|
|
def __init__(self, **kwargs): |
40
|
|
|
self.__dict__.update(kwargs) |
41
|
|
|
|
42
|
|
|
def __getitem__(self, item): |
43
|
|
|
return self.__dict__[item] |
44
|
|
|
|
45
|
|
|
|
46
|
|
|
class LooseFileLike(BytesIO): |
47
|
|
|
def close(self): |
48
|
|
|
value = self.getvalue() |
49
|
|
|
super(LooseFileLike, self).close() |
50
|
|
|
self.getvalue = lambda: value |
51
|
|
|
|
52
|
|
|
|
53
|
|
|
class MockSession(BenchmarkSession): |
54
|
|
|
def __init__(self, name_format): |
55
|
|
|
self.histogram = True |
56
|
|
|
self.storage = Storage(str(STORAGE), default_machine_id=get_machine_id(), logger=None) |
57
|
|
|
self.benchmarks = [] |
58
|
|
|
self.performance_regressions = [] |
59
|
|
|
self.sort = u"min" |
60
|
|
|
self.compare = '0001' |
61
|
|
|
self.logger = logging.getLogger(__name__) |
62
|
|
|
self.machine_id = "FoobarOS" |
63
|
|
|
self.machine_info = {'foo': 'bar'} |
64
|
|
|
self.name_format = NAME_FORMATTERS[name_format] |
65
|
|
|
self.save = self.autosave = self.json = False |
66
|
|
|
self.options = { |
67
|
|
|
'min_rounds': 123, |
68
|
|
|
'min_time': 234, |
69
|
|
|
'max_time': 345, |
70
|
|
|
} |
71
|
|
|
self.compare_fail = [] |
72
|
|
|
self.config = Namespace(hook=Namespace( |
73
|
|
|
pytest_benchmark_group_stats=pytest_benchmark_group_stats, |
74
|
|
|
pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'}, |
75
|
|
|
pytest_benchmark_update_machine_info=lambda **kwargs: None, |
76
|
|
|
pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info, |
77
|
|
|
pytest_benchmark_generate_json=pytest_benchmark_generate_json, |
78
|
|
|
pytest_benchmark_update_json=lambda **kwargs: None, |
79
|
|
|
pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'}, |
80
|
|
|
pytest_benchmark_update_commit_info=lambda **kwargs: None, |
81
|
|
|
)) |
82
|
|
|
self.group_by = 'group' |
83
|
|
|
self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr', |
84
|
|
|
'outliers', 'rounds', 'iterations'] |
85
|
|
|
for bench_file in reversed(self.storage.query("[0-9][0-9][0-9][0-9]_*")): |
86
|
|
|
with bench_file.open('rU') as fh: |
87
|
|
|
data = json.load(fh) |
88
|
|
|
self.benchmarks.extend( |
89
|
|
|
Namespace( |
90
|
|
|
as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench: |
91
|
|
|
dict(_bench, **_bench["stats"]) if flat else dict(_bench), |
92
|
|
|
name=bench['name'], |
93
|
|
|
fullname=bench['fullname'], |
94
|
|
|
group=bench['group'], |
95
|
|
|
options=bench['options'], |
96
|
|
|
has_error=False, |
97
|
|
|
params=None, |
98
|
|
|
**bench['stats'] |
99
|
|
|
) |
100
|
|
|
for bench in data['benchmarks'] |
101
|
|
|
) |
102
|
|
|
break |
103
|
|
|
|
104
|
|
|
|
105
|
|
|
try: |
106
|
|
|
text_type = unicode |
107
|
|
|
except NameError: |
108
|
|
|
text_type = str |
109
|
|
|
|
110
|
|
|
|
111
|
|
|
def force_text(text): |
112
|
|
|
if isinstance(text, text_type): |
113
|
|
|
return text |
114
|
|
|
else: |
115
|
|
|
return text.decode('utf-8') |
116
|
|
|
|
117
|
|
|
|
118
|
|
|
def force_bytes(text): |
119
|
|
|
if isinstance(text, text_type): |
120
|
|
|
return text.encode('utf-8') |
121
|
|
|
else: |
122
|
|
|
return text |
123
|
|
|
|
124
|
|
|
|
125
|
|
|
@pytest.fixture(params=['short', 'normal', 'long']) |
126
|
|
|
def name_format(request): |
127
|
|
|
return request.param |
128
|
|
|
|
129
|
|
|
|
130
|
|
|
@pytest.fixture |
131
|
|
|
def sess(request, name_format): |
132
|
|
|
return MockSession(name_format) |
133
|
|
|
|
134
|
|
|
|
135
|
|
|
def make_logger(sess): |
136
|
|
|
output = StringIO() |
137
|
|
|
sess.logger = Namespace( |
138
|
|
|
warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)), |
139
|
|
|
info=lambda text, **opts: output.write(force_text(text) + u'\n'), |
140
|
|
|
error=lambda text: output.write(force_text(text) + u'\n'), |
141
|
|
|
) |
142
|
|
|
return output |
143
|
|
|
|
144
|
|
|
|
145
|
|
|
def test_rendering(sess): |
146
|
|
|
output = make_logger(sess) |
147
|
|
|
sess.histogram = os.path.join('docs', 'sample') |
148
|
|
|
sess.compare = '*/*' |
149
|
|
|
sess.sort = 'name' |
150
|
|
|
sess.finish() |
151
|
|
|
sess.display(Namespace( |
152
|
|
|
ensure_newline=lambda: None, |
153
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
154
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
155
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
156
|
|
|
)) |
157
|
|
|
|
158
|
|
|
|
159
|
|
View Code Duplication |
def test_regression_checks(sess, name_format): |
|
|
|
|
160
|
|
|
output = make_logger(sess) |
161
|
|
|
sess.handle_loading() |
162
|
|
|
sess.performance_regressions = [] |
163
|
|
|
sess.compare_fail = [ |
164
|
|
|
PercentageRegressionCheck("stddev", 5), |
165
|
|
|
DifferenceRegressionCheck("max", 0.000001) |
166
|
|
|
] |
167
|
|
|
sess.finish() |
168
|
|
|
pytest.raises(PerformanceRegression, sess.display, Namespace( |
169
|
|
|
ensure_newline=lambda: None, |
170
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
171
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
172
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
173
|
|
|
)) |
174
|
|
|
print(output.getvalue()) |
175
|
|
|
assert sess.performance_regressions == { |
176
|
|
|
'normal': [ |
177
|
|
|
('test_xfast_parametrized[0] (0001_b87b9aa)', |
178
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"), |
179
|
|
|
('test_xfast_parametrized[0] (0001_b87b9aa)', |
180
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000") |
181
|
|
|
], |
182
|
|
|
'short': [ |
183
|
|
|
('xfast_parametrized[0] (0001)', |
184
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"), |
185
|
|
|
('xfast_parametrized[0] (0001)', |
186
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000") |
187
|
|
|
], |
188
|
|
|
'long': [ |
189
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)', |
190
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"), |
191
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)', |
192
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000") |
193
|
|
|
], |
194
|
|
|
}[name_format] |
195
|
|
|
output = make_logger(sess) |
196
|
|
|
pytest.raises(PerformanceRegression, sess.check_regressions) |
197
|
|
|
print(output.getvalue()) |
198
|
|
|
assert output.getvalue() == { |
199
|
|
|
'short': """Performance has regressed: |
200
|
|
|
\txfast_parametrized[0] (0001) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000 |
201
|
|
|
\txfast_parametrized[0] (0001) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000 |
202
|
|
|
""", |
203
|
|
|
'normal': """Performance has regressed: |
204
|
|
|
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000 |
205
|
|
|
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000 |
206
|
|
|
""", |
207
|
|
|
'long': """Performance has regressed: |
208
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000 |
209
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000 |
210
|
|
|
""" |
211
|
|
|
}[name_format] |
212
|
|
|
|
213
|
|
|
|
214
|
|
View Code Duplication |
@pytest.mark.skipif(sys.version_info[:2] < (2, 7), |
|
|
|
|
215
|
|
|
reason="Something weird going on, see: https://bugs.python.org/issue4482") |
216
|
|
|
def test_regression_checks_inf(sess, name_format): |
217
|
|
|
output = make_logger(sess) |
218
|
|
|
sess.compare = '0002' |
219
|
|
|
sess.handle_loading() |
220
|
|
|
sess.performance_regressions = [] |
221
|
|
|
sess.compare_fail = [ |
222
|
|
|
PercentageRegressionCheck("stddev", 5), |
223
|
|
|
DifferenceRegressionCheck("max", 0.000001) |
224
|
|
|
] |
225
|
|
|
sess.finish() |
226
|
|
|
pytest.raises(PerformanceRegression, sess.display, Namespace( |
227
|
|
|
ensure_newline=lambda: None, |
228
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
229
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
230
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
231
|
|
|
)) |
232
|
|
|
print(output.getvalue()) |
233
|
|
|
assert sess.performance_regressions == { |
234
|
|
|
'normal': [ |
235
|
|
|
('test_xfast_parametrized[0] (0002_b87b9aa)', |
236
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"), |
237
|
|
|
('test_xfast_parametrized[0] (0002_b87b9aa)', |
238
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000") |
239
|
|
|
], |
240
|
|
|
'short': [ |
241
|
|
|
('xfast_parametrized[0] (0002)', |
242
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"), |
243
|
|
|
('xfast_parametrized[0] (0002)', |
244
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000") |
245
|
|
|
], |
246
|
|
|
'long': [ |
247
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0] ' |
248
|
|
|
'(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)', |
249
|
|
|
"Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"), |
250
|
|
|
('tests/test_normal.py::test_xfast_parametrized[0] ' |
251
|
|
|
'(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)', |
252
|
|
|
"Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > " |
253
|
|
|
'0.000001000') |
254
|
|
|
] |
255
|
|
|
}[name_format] |
256
|
|
|
output = make_logger(sess) |
257
|
|
|
pytest.raises(PerformanceRegression, sess.check_regressions) |
258
|
|
|
print(output.getvalue()) |
259
|
|
|
assert output.getvalue() == { |
260
|
|
|
'short': """Performance has regressed: |
261
|
|
|
\txfast_parametrized[0] (0002) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000 |
262
|
|
|
\txfast_parametrized[0] (0002) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000 |
263
|
|
|
""", |
264
|
|
|
'normal': """Performance has regressed: |
265
|
|
|
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000 |
266
|
|
|
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000 |
267
|
|
|
""", |
268
|
|
|
'long': """Performance has regressed: |
269
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000 |
270
|
|
|
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000 |
271
|
|
|
""" |
272
|
|
|
}[name_format] |
273
|
|
|
|
274
|
|
|
|
275
|
|
View Code Duplication |
def test_compare_1(sess, LineMatcher): |
|
|
|
|
276
|
|
|
output = make_logger(sess) |
277
|
|
|
sess.handle_loading() |
278
|
|
|
sess.finish() |
279
|
|
|
sess.display(Namespace( |
280
|
|
|
ensure_newline=lambda: None, |
281
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
282
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
283
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
284
|
|
|
)) |
285
|
|
|
print(output.getvalue()) |
286
|
|
|
LineMatcher(output.getvalue().splitlines()).fnmatch_lines([ |
287
|
|
|
'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}', |
288
|
|
|
'Comparing against benchmarks from: 0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted' |
289
|
|
|
'-changes.json', |
290
|
|
|
'', |
291
|
|
|
'*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*', |
292
|
|
|
'Name (time in ns) * Min *Max Mean StdDev Median IQR Outliers(*) Rounds Iterations', |
293
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
294
|
|
|
'*xfast_parametrized[[]0[]] (0001*) 217.3145 (1.0) 11*447.3891 (1.0) 262.2408 (1.00) 214.0442 (1.0) 220.1664 (1.00) 38.2154 (2.03) 90;1878 9987 418', |
295
|
|
|
'*xfast_parametrized[[]0[]] (NOW) * 217.9511 (1.00) 13*290.0380 (1.16) 261.2051 (1.0) 263.9842 (1.23) 220.1638 (1.0) 18.8080 (1.0) 160;1726 9710 431', |
296
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
297
|
|
|
'(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', |
298
|
|
|
]) |
299
|
|
|
|
300
|
|
|
|
301
|
|
View Code Duplication |
def test_compare_2(sess, LineMatcher): |
|
|
|
|
302
|
|
|
output = make_logger(sess) |
303
|
|
|
sess.compare = '0002' |
304
|
|
|
sess.handle_loading() |
305
|
|
|
sess.finish() |
306
|
|
|
sess.display(Namespace( |
307
|
|
|
ensure_newline=lambda: None, |
308
|
|
|
write_line=lambda line, **opts: output.write(force_text(line) + u'\n'), |
309
|
|
|
section=lambda line, **opts: output.write(force_text(line) + u'\n'), |
310
|
|
|
write=lambda text, **opts: output.write(force_text(text)), |
311
|
|
|
rewrite=lambda text, **opts: output.write(force_text(text)), |
312
|
|
|
)) |
313
|
|
|
print(output.getvalue()) |
314
|
|
|
LineMatcher(output.getvalue().splitlines()).fnmatch_lines([ |
315
|
|
|
'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}', |
316
|
|
|
'Comparing against benchmarks from: 0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes.json', |
317
|
|
|
'', |
318
|
|
|
'*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*', |
319
|
|
|
'Name (time in ns) * Min *Max Mean StdDev Median IQR Outliers(*) Rounds Iterations', |
320
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
321
|
|
|
'*xfast_parametrized[[]0[]] (0002*) 216.9028 (1.0) 7*739.2997 (1.0) 254.0585 (1.0) 0.0000 (1.0) 219.8103 (1.0) 27.3309 (1.45) 235;1688 11009 410', |
322
|
|
|
'*xfast_parametrized[[]0[]] (NOW) * 217.9511 (1.00) 13*290.0380 (1.72) 261.2051 (1.03) 263.9842 (inf) 220.1638 (1.00) 18.8080 (1.0) 160;1726 9710 431', |
323
|
|
|
'--------------------------------------------------------------------------------------------------------------------------------------------------------------------*', |
324
|
|
|
'(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.', |
325
|
|
|
]) |
326
|
|
|
|
327
|
|
|
@freeze_time("2015-08-15T00:04:18.687119") |
328
|
|
|
def test_save_json(sess, tmpdir, monkeypatch): |
329
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
330
|
|
|
sess.save = False |
331
|
|
|
sess.autosave = False |
332
|
|
|
sess.json = LooseFileLike() |
333
|
|
|
sess.save_data = False |
334
|
|
|
sess.handle_saving() |
335
|
|
|
assert tmpdir.listdir() == [] |
336
|
|
|
assert json.loads(sess.json.getvalue().decode()) == JSON_DATA |
337
|
|
|
|
338
|
|
|
|
339
|
|
View Code Duplication |
@freeze_time("2015-08-15T00:04:18.687119") |
|
|
|
|
340
|
|
|
def test_save_with_name(sess, tmpdir, monkeypatch): |
341
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
342
|
|
|
sess.save = 'foobar' |
343
|
|
|
sess.autosave = True |
344
|
|
|
sess.json = None |
345
|
|
|
sess.save_data = False |
346
|
|
|
sess.storage.path = Path(str(tmpdir)) |
347
|
|
|
sess.handle_saving() |
348
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
349
|
|
|
assert len(files) == 1 |
350
|
|
|
assert json.load(files[0].open('rU')) == SAVE_DATA |
351
|
|
|
|
352
|
|
|
|
353
|
|
View Code Duplication |
@freeze_time("2015-08-15T00:04:18.687119") |
|
|
|
|
354
|
|
|
def test_save_no_name(sess, tmpdir, monkeypatch): |
355
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
356
|
|
|
sess.save = True |
357
|
|
|
sess.autosave = True |
358
|
|
|
sess.json = None |
359
|
|
|
sess.save_data = False |
360
|
|
|
sess.storage.path = Path(str(tmpdir)) |
361
|
|
|
sess.handle_saving() |
362
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
363
|
|
|
assert len(files) == 1 |
364
|
|
|
assert json.load(files[0].open('rU')) == SAVE_DATA |
365
|
|
|
|
366
|
|
|
|
367
|
|
View Code Duplication |
@freeze_time("2015-08-15T00:04:18.687119") |
|
|
|
|
368
|
|
|
def test_save_with_error(sess, tmpdir, monkeypatch): |
369
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
370
|
|
|
sess.save = True |
371
|
|
|
sess.autosave = True |
372
|
|
|
sess.json = None |
373
|
|
|
sess.save_data = False |
374
|
|
|
sess.storage.path = Path(str(tmpdir)) |
375
|
|
|
for bench in sess.benchmarks: |
376
|
|
|
bench.has_error = True |
377
|
|
|
sess.handle_saving() |
378
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
379
|
|
|
assert len(files) == 1 |
380
|
|
|
assert json.load(files[0].open('rU')) == { |
381
|
|
|
'benchmarks': [], |
382
|
|
|
'commit_info': {'foo': 'bar'}, |
383
|
|
|
'datetime': '2015-08-15T00:04:18.687119', |
384
|
|
|
'machine_info': {'foo': 'bar'}, |
385
|
|
|
'version': '2.5.0' |
386
|
|
|
} |
387
|
|
|
|
388
|
|
|
|
389
|
|
View Code Duplication |
@freeze_time("2015-08-15T00:04:18.687119") |
|
|
|
|
390
|
|
|
def test_autosave(sess, tmpdir, monkeypatch): |
391
|
|
|
monkeypatch.setattr(plugin, '__version__', '2.5.0') |
392
|
|
|
sess.save = False |
393
|
|
|
sess.autosave = True |
394
|
|
|
sess.json = None |
395
|
|
|
sess.save_data = False |
396
|
|
|
sess.storage.path = Path(str(tmpdir)) |
397
|
|
|
sess.handle_saving() |
398
|
|
|
files = list(Path(str(tmpdir)).rglob('*.json')) |
399
|
|
|
assert len(files) == 1 |
400
|
|
|
assert json.load(files[0].open('rU')) == SAVE_DATA |
401
|
|
|
|