Completed
Push — master ( 62afae...779b59 )
by Ionel Cristian
35s
created

tests/test_storage.py (8 issues)

1
import json
2
import logging
3
import os
4
import sys
5
from io import BytesIO
6
from io import StringIO
7
from pathlib import Path
8
9
import py
10
import pytest
11
from freezegun import freeze_time
12
13
from pytest_benchmark import plugin
14
from pytest_benchmark.plugin import BenchmarkSession
15
from pytest_benchmark.plugin import pytest_benchmark_compare_machine_info
16
from pytest_benchmark.plugin import pytest_benchmark_generate_json
17
from pytest_benchmark.plugin import pytest_benchmark_group_stats
18
from pytest_benchmark.session import PerformanceRegression
19
from pytest_benchmark.stats import normalize_stats
20
from pytest_benchmark.storage.file import FileStorage
21
from pytest_benchmark.utils import NAME_FORMATTERS
22
from pytest_benchmark.utils import DifferenceRegressionCheck
23
from pytest_benchmark.utils import PercentageRegressionCheck
24
from pytest_benchmark.utils import get_machine_id
25
26
pytest_plugins = "pytester"
27
28
29
THIS = py.path.local(__file__)
30
STORAGE = THIS.dirpath(THIS.purebasename)
31
32
JSON_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open())
33
JSON_DATA["machine_info"] = {'foo': 'bar'}
34
JSON_DATA["commit_info"] = {'foo': 'bar'}
35
list(normalize_stats(bench['stats']) for bench in JSON_DATA["benchmarks"])
36
37
38
class Namespace(object):
39
    def __init__(self, **kwargs):
40
        self.__dict__.update(kwargs)
41
42
    def __getitem__(self, item):
43
        return self.__dict__[item]
44
45
    def getoption(self, item, default=None):
46
        try:
47
            return self[item]
48
        except KeyError:
49
            return default
50
51
52
class LooseFileLike(BytesIO):
53
    def close(self):
54
        value = self.getvalue()
55
        super(LooseFileLike, self).close()
56
        self.getvalue = lambda: value
57
58
59
class MockSession(BenchmarkSession):
60
    def __init__(self, name_format):
61
        self.histogram = True
62
        self.verbose = False
63
        self.benchmarks = []
64
        self.performance_regressions = []
65
        self.sort = u"min"
66
        self.compare = '0001'
67
        self.logger = logging.getLogger(__name__)
68
        self.machine_id = "FoobarOS"
69
        self.machine_info = {'foo': 'bar'}
70
        self.save = self.autosave = self.json = False
71
        self.name_format = NAME_FORMATTERS[name_format]
72
        self.options = {
73
            'min_rounds': 123,
74
            'min_time': 234,
75
            'max_time': 345,
76
            'use_cprofile': False,
77
        }
78
        self.cprofile_sort_by = 'cumtime'
79
        self.compare_fail = []
80
        self.config = Namespace(hook=Namespace(
81
            pytest_benchmark_group_stats=pytest_benchmark_group_stats,
82
            pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
83
            pytest_benchmark_update_machine_info=lambda **kwargs: None,
84
            pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
85
            pytest_benchmark_generate_json=pytest_benchmark_generate_json,
86
            pytest_benchmark_update_json=lambda **kwargs: None,
87
            pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
88
            pytest_benchmark_update_commit_info=lambda **kwargs: None,
89
        ))
90
        self.storage = FileStorage(str(STORAGE), default_machine_id=get_machine_id(), logger=self.logger)
91
        self.group_by = 'group'
92
        self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr',
93
                        'outliers', 'rounds', 'iterations', 'ops']
94
        for bench_file, data in reversed(list(self.storage.load("[0-9][0-9][0-9][0-9]_*"))):
95
            self.benchmarks.extend(
96
                Namespace(
97
                    as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench, cprofile='cumtime':
98
                        dict(_bench, **_bench["stats"]) if flat else dict(_bench),
99
                    name=bench['name'],
100
                    fullname=bench['fullname'],
101
                    group=bench['group'],
102
                    options=bench['options'],
103
                    has_error=False,
104
                    params=None,
105
                    **bench['stats']
106
                )
107
                for bench in data['benchmarks']
108
            )
109
            break
110
111
112
try:
113
    text_type = unicode
114
except NameError:
115
    text_type = str
116
117
118
def force_text(text):
119
    if isinstance(text, text_type):
120
        return text
121
    else:
122
        return text.decode('utf-8')
123
124
125
def force_bytes(text):
126
    if isinstance(text, text_type):
127
        return text.encode('utf-8')
128
    else:
129
        return text
130
131
132
@pytest.fixture(params=['short', 'normal', 'long'])
133
def name_format(request):
134
    return request.param
135
136
137
@pytest.fixture
138
def sess(request, name_format):
139
    return MockSession(name_format)
140
141
142 View Code Duplication
def make_logger(sess):
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
143
    output = StringIO()
144
    sess.logger = Namespace(
145
        warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)),
146
        info=lambda text, **opts: output.write(force_text(text) + u'\n'),
147
        error=lambda text: output.write(force_text(text) + u'\n'),
148
    )
149
    sess.storage.logger = Namespace(
150
        warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)),
151
        info=lambda text, **opts: output.write(force_text(text) + u'\n'),
152
        error=lambda text: output.write(force_text(text) + u'\n'),
153
    )
154
    return output
155
156
157
def test_rendering(sess):
158
    output = make_logger(sess)
159
    sess.histogram = os.path.join('docs', 'sample')
160
    sess.compare = '*/*'
161
    sess.sort = 'name'
162
    sess.handle_loading()
163
    sess.finish()
164
    sess.display(Namespace(
165
        ensure_newline=lambda: None,
166
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
167
        write=lambda text, **opts: output.write(force_text(text)),
168
        rewrite=lambda text, **opts: output.write(force_text(text)),
169
    ))
170
171
172 View Code Duplication
def test_regression_checks(sess, name_format):
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
173
    output = make_logger(sess)
174
    sess.handle_loading()
175
    sess.performance_regressions = []
176
    sess.compare_fail = [
177
        PercentageRegressionCheck("stddev", 5),
178
        DifferenceRegressionCheck("max", 0.000001)
179
    ]
180
    sess.finish()
181
    pytest.raises(PerformanceRegression, sess.display, Namespace(
182
        ensure_newline=lambda: None,
183
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
184
        write=lambda text, **opts: output.write(force_text(text)),
185
        rewrite=lambda text, **opts: output.write(force_text(text)),
186
    ))
187
    print(output.getvalue())
188
    assert sess.performance_regressions == {
189
        'normal': [
190
            ('test_xfast_parametrized[0] (0001_b87b9aa)',
191
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
192
            ('test_xfast_parametrized[0] (0001_b87b9aa)',
193
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
194
        ],
195
        'short': [
196
            ('xfast_parametrized[0] (0001)',
197
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
198
            ('xfast_parametrized[0] (0001)',
199
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
200
        ],
201
        'long': [
202
            ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
203
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
204
            ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
205
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
206
        ],
207
    }[name_format]
208
    output = make_logger(sess)
209
    pytest.raises(PerformanceRegression, sess.check_regressions)
210
    print(output.getvalue())
211
    assert output.getvalue() == {
212
        'short': """Performance has regressed:
213
\txfast_parametrized[0] (0001) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
214
\txfast_parametrized[0] (0001) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
215
""",
216
        'normal': """Performance has regressed:
217
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
218
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
219
""",
220
        'long': """Performance has regressed:
221
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
222
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
223
"""
224
    }[name_format]
225
226
227 View Code Duplication
@pytest.mark.skipif(sys.version_info[:2] < (2, 7),
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
228
                    reason="Something weird going on, see: https://bugs.python.org/issue4482")
229
def test_regression_checks_inf(sess, name_format):
230
    output = make_logger(sess)
231
    sess.compare = '0002'
232
    sess.handle_loading()
233
    sess.performance_regressions = []
234
    sess.compare_fail = [
235
        PercentageRegressionCheck("stddev", 5),
236
        DifferenceRegressionCheck("max", 0.000001)
237
    ]
238
    sess.finish()
239
    pytest.raises(PerformanceRegression, sess.display, Namespace(
240
        ensure_newline=lambda: None,
241
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
242
        write=lambda text, **opts: output.write(force_text(text)),
243
        rewrite=lambda text, **opts: output.write(force_text(text)),
244
    ))
245
    print(output.getvalue())
246
    assert sess.performance_regressions == {
247
        'normal': [
248
            ('test_xfast_parametrized[0] (0002_b87b9aa)',
249
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
250
            ('test_xfast_parametrized[0] (0002_b87b9aa)',
251
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000")
252
        ],
253
        'short': [
254
            ('xfast_parametrized[0] (0002)',
255
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
256
            ('xfast_parametrized[0] (0002)',
257
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000")
258
        ],
259
        'long': [
260
            ('tests/test_normal.py::test_xfast_parametrized[0] '
261
             '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
262
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
263
            ('tests/test_normal.py::test_xfast_parametrized[0] '
264
             '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
265
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > "
266
             '0.000001000')
267
        ]
268
    }[name_format]
269
    output = make_logger(sess)
270
    pytest.raises(PerformanceRegression, sess.check_regressions)
271
    print(output.getvalue())
272
    assert output.getvalue() == {
273
        'short': """Performance has regressed:
274
\txfast_parametrized[0] (0002) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
275
\txfast_parametrized[0] (0002) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
276
""",
277
        'normal': """Performance has regressed:
278
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
279
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
280
""",
281
        'long': """Performance has regressed:
282
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
283
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
284
"""
285
    }[name_format]
286
287
288 View Code Duplication
def test_compare_1(sess, LineMatcher):
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
289
    output = make_logger(sess)
290
    sess.handle_loading()
291
    sess.finish()
292
    sess.display(Namespace(
293
        ensure_newline=lambda: None,
294
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
295
        write=lambda text, **opts: output.write(force_text(text)),
296
        rewrite=lambda text, **opts: output.write(force_text(text)),
297
    ))
298
    print(output.getvalue())
299
    LineMatcher(output.getvalue().splitlines()).fnmatch_lines([
300
        'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}',
301
        'Comparing against benchmarks from: 0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted'
302
        '-changes.json',
303
        '',
304
        '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
305
        'Name (time in ns)               *      Min                 *Max                Mean              StdDev              Median                IQR            Outliers  Rounds  Iterations  OPS (Mops/s) *',
306
        '-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
307
        '*xfast_parametrized[[]0[]] (0001*)     217.3145 (1.0)      11*447.3891 (1.0)      262.2408 (1.00)     214.0442 (1.0)      220.1664 (1.00)     38.2154 (2.03)      90;1878    9987         418        3.8133 (1.00)*',
308
        '*xfast_parametrized[[]0[]] (NOW) *     217.9511 (1.00)     13*290.0380 (1.16)     261.2051 (1.0)      263.9842 (1.23)     220.1638 (1.0)      18.8080 (1.0)      160;1726    9710         431        3.8284 (1.0)*',
309
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
310
        'Legend:',
311
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
312
        '  OPS: Operations Per Second, computed as 1 / Mean',
313
    ])
314
315
316 View Code Duplication
def test_compare_2(sess, LineMatcher):
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
317
    output = make_logger(sess)
318
    sess.compare = '0002'
319
    sess.handle_loading()
320
    sess.finish()
321
    sess.display(Namespace(
322
        ensure_newline=lambda: None,
323
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
324
        section=lambda line, **opts: output.write(force_text(line) + u'\n'),
325
        write=lambda text, **opts: output.write(force_text(text)),
326
        rewrite=lambda text, **opts: output.write(force_text(text)),
327
    ))
328
    print(output.getvalue())
329
    LineMatcher(output.getvalue().splitlines()).fnmatch_lines([
330
        'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}',
331
        'Comparing against benchmarks from: 0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes.json',
332
        '',
333
        '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
334
        'Name (time in ns)            *         Min                 *Max                Mean              StdDev              Median                IQR            Outliers  Rounds  Iterations  OPS (Mops/s)*',
335
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
336
        '*xfast_parametrized[[]0[]] (0002*)     216.9028 (1.0)       7*739.2997 (1.0)      254.0585 (1.0)        0.0000 (1.0)      219.8103 (1.0)      27.3309 (1.45)     235;1688   11009         410        3.9361 (1.0)*',
337
        '*xfast_parametrized[[]0[]] (NOW) *     217.9511 (1.00)     13*290.0380 (1.72)     261.2051 (1.03)     263.9842 (inf)      220.1638 (1.00)     18.8080 (1.0)      160;1726    9710         431        3.8284 (0.97)*',
338
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
339
        'Legend:',
340
        '  Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
341
        '  OPS: Operations Per Second, computed as 1 / Mean',
342
    ])
343
344
@freeze_time("2015-08-15T00:04:18.687119")
345
def test_save_json(sess, tmpdir, monkeypatch):
346
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
347
    sess.save = False
348
    sess.autosave = False
349
    sess.json = LooseFileLike()
350
    sess.save_data = False
351
    sess.handle_saving()
352
    assert tmpdir.listdir() == []
353
    assert json.loads(sess.json.getvalue().decode()) == JSON_DATA
354
355
356 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
357
def test_save_with_name(sess, tmpdir, monkeypatch):
358
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
359
    sess.save = 'foobar'
360
    sess.autosave = True
361
    sess.json = None
362
    sess.save_data = False
363
    sess.storage.path = Path(str(tmpdir))
364
    sess.handle_saving()
365
    files = list(Path(str(tmpdir)).rglob('*.json'))
366
    print(files)
367
    assert len(files) == 1
368
    assert json.load(files[0].open('rU')) == JSON_DATA
369
370
371 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
372
def test_save_no_name(sess, tmpdir, monkeypatch):
373
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
374
    sess.save = True
375
    sess.autosave = True
376
    sess.json = None
377
    sess.save_data = False
378
    sess.storage.path = Path(str(tmpdir))
379
    sess.handle_saving()
380
    files = list(Path(str(tmpdir)).rglob('*.json'))
381
    assert len(files) == 1
382
    assert json.load(files[0].open('rU')) == JSON_DATA
383
384
385
@freeze_time("2015-08-15T00:04:18.687119")
386
def test_save_with_error(sess, tmpdir, monkeypatch):
387
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
388
    sess.save = True
389
    sess.autosave = True
390
    sess.json = None
391
    sess.save_data = False
392
    sess.storage.path = Path(str(tmpdir))
393
    for bench in sess.benchmarks:
394
        bench.has_error = True
395
    sess.handle_saving()
396
    files = list(Path(str(tmpdir)).rglob('*.json'))
397
    assert len(files) == 1
398
    assert json.load(files[0].open('rU')) == {
399
        'benchmarks': [],
400
        'commit_info': {'foo': 'bar'},
401
        'datetime': '2015-08-15T00:04:18.687119',
402
        'machine_info': {'foo': 'bar'},
403
        'version': '2.5.0'
404
    }
405
406
407 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
This code seems to be duplicated in your project.
Loading history...
408
def test_autosave(sess, tmpdir, monkeypatch):
409
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
410
    sess.save = False
411
    sess.autosave = True
412
    sess.json = None
413
    sess.save_data = False
414
    sess.storage.path = Path(str(tmpdir))
415
    sess.handle_saving()
416
    files = list(Path(str(tmpdir)).rglob('*.json'))
417
    assert len(files) == 1
418
    assert json.load(files[0].open('rU')) == JSON_DATA
419