Completed
Pull Request — master (#58)
by
unknown
01:10
created

make_logger()   B

Complexity

Conditions 7

Size

Total Lines 13

Duplication

Lines 13
Ratio 100 %

Importance

Changes 3
Bugs 0 Features 0
Metric Value
cc 7
c 3
b 0
f 0
dl 13
loc 13
rs 7.3333
1
import json
2
import logging
3
import os
4
import sys
5
from io import BytesIO
6
from io import StringIO
7
8
import py
9
import pytest
10
from freezegun import freeze_time
11
from pathlib import Path
12
13
from pytest_benchmark import plugin
14
from pytest_benchmark.plugin import BenchmarkSession
15
from pytest_benchmark.plugin import pytest_benchmark_compare_machine_info
16
from pytest_benchmark.plugin import pytest_benchmark_generate_json
17
from pytest_benchmark.plugin import pytest_benchmark_group_stats
18
from pytest_benchmark.session import PerformanceRegression
19
from pytest_benchmark.storage import Storage
20
from pytest_benchmark.utils import NAME_FORMATTERS
21
from pytest_benchmark.utils import DifferenceRegressionCheck
22
from pytest_benchmark.utils import PercentageRegressionCheck
23
from pytest_benchmark.utils import get_machine_id
24
from pytest_benchmark.report_backend import FileReportBackend
25
26
27
pytest_plugins = "pytester"
28
29
30
THIS = py.path.local(__file__)
31
STORAGE = THIS.dirpath(THIS.purebasename)
32
33
SAVE_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open())
34
JSON_DATA = json.load(STORAGE.listdir('0030_*.json')[0].open())
35
SAVE_DATA["machine_info"] = JSON_DATA["machine_info"] = {'foo': 'bar'}
36
SAVE_DATA["commit_info"] = JSON_DATA["commit_info"] = {'foo': 'bar'}
37
38
39
class Namespace(object):
40
    def __init__(self, **kwargs):
41
        self.__dict__.update(kwargs)
42
43
    def __getitem__(self, item):
44
        return self.__dict__[item]
45
46
47
class LooseFileLike(BytesIO):
48
    def close(self):
49
        value = self.getvalue()
50
        super(LooseFileLike, self).close()
51
        self.getvalue = lambda: value
52
53
54
class MockFileReportBackend(FileReportBackend):
55
    def __init__(self, config):
56
        self.verbose = False
57
        self.logger = logging.getLogger(__name__)
58
        self.config = config
59
        self.performance_regressions = []
60
        self.benchmarks = []
61
        self.machine_id = "FoobarOS"
62
        self.storage = Storage(str(STORAGE), default_machine_id=get_machine_id(), logger=None)
63
        self.compare = '0001'
64
        self.save = self.autosave = self.json = False
65
66
67
class MockSession(BenchmarkSession):
68
    def __init__(self, name_format):
69
        self.histogram = True
70
        self.benchmarks = []
71
        self.performance_regressions = []
72
        self.sort = u"min"
73
        self.logger = logging.getLogger(__name__)
74
        self.machine_id = "FoobarOS"
75
        self.machine_info = {'foo': 'bar'}
76
        self.name_format = NAME_FORMATTERS[name_format]
77
        self.options = {
78
            'min_rounds': 123,
79
            'min_time': 234,
80
            'max_time': 345,
81
        }
82
        self.compare_fail = []
83
        self.config = Namespace(hook=Namespace(
84
            pytest_benchmark_group_stats=pytest_benchmark_group_stats,
85
            pytest_benchmark_generate_machine_info=lambda **kwargs: {'foo': 'bar'},
86
            pytest_benchmark_update_machine_info=lambda **kwargs: None,
87
            pytest_benchmark_compare_machine_info=pytest_benchmark_compare_machine_info,
88
            pytest_benchmark_generate_json=pytest_benchmark_generate_json,
89
            pytest_benchmark_update_json=lambda **kwargs: None,
90
            pytest_benchmark_generate_commit_info=lambda **kwargs: {'foo': 'bar'},
91
            pytest_benchmark_update_commit_info=lambda **kwargs: None,
92
        ))
93
        self.report_backend = MockFileReportBackend(self.config)
94
        self.group_by = 'group'
95
        self.columns = ['min', 'max', 'mean', 'stddev', 'median', 'iqr',
96
                        'outliers', 'rounds', 'iterations']
97
        for bench_file in reversed(self.report_backend.storage.query("[0-9][0-9][0-9][0-9]_*")):
98
            with bench_file.open('rU') as fh:
99
                data = json.load(fh)
100
            self.benchmarks.extend(
101
                Namespace(
102
                    as_dict=lambda include_data=False, stats=True, flat=False, _bench=bench:
103
                        dict(_bench, **_bench["stats"]) if flat else dict(_bench),
104
                    name=bench['name'],
105
                    fullname=bench['fullname'],
106
                    group=bench['group'],
107
                    options=bench['options'],
108
                    has_error=False,
109
                    params=None,
110
                    **bench['stats']
111
                )
112
                for bench in data['benchmarks']
113
            )
114
            break
115
116
117
try:
118
    text_type = unicode
119
except NameError:
120
    text_type = str
121
122
123
def force_text(text):
124
    if isinstance(text, text_type):
125
        return text
126
    else:
127
        return text.decode('utf-8')
128
129
130
def force_bytes(text):
131
    if isinstance(text, text_type):
132
        return text.encode('utf-8')
133
    else:
134
        return text
135
136
137
@pytest.fixture(params=['short', 'normal', 'long'])
138
def name_format(request):
139
    return request.param
140
141
142
@pytest.fixture
143
def sess(request, name_format):
144
    return MockSession(name_format)
145
146
147 View Code Duplication
def make_logger(sess):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
148
    output = StringIO()
149
    sess.logger = Namespace(
150
        warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)),
151
        info=lambda text, **opts: output.write(force_text(text) + u'\n'),
152
        error=lambda text: output.write(force_text(text) + u'\n'),
153
    )
154
    sess.report_backend.logger = Namespace(
155
        warn=lambda code, text, **opts: output.write(u"%s: %s %s\n" % (code, force_text(text), opts)),
156
        info=lambda text, **opts: output.write(force_text(text) + u'\n'),
157
        error=lambda text: output.write(force_text(text) + u'\n'),
158
    )
159
    return output
160
161
162
def test_rendering(sess):
163
    output = make_logger(sess)
164
    sess.histogram = os.path.join('docs', 'sample')
165
    sess.report_backend.compare = '*/*'
166
    sess.sort = 'name'
167
    sess.finish()
168
    sess.display(Namespace(
169
        ensure_newline=lambda: None,
170
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
171
        write=lambda text, **opts: output.write(force_text(text)),
172
        rewrite=lambda text, **opts: output.write(force_text(text)),
173
    ))
174
175
176
def test_regression_checks(sess, name_format):
177
    output = make_logger(sess)
178
    sess.report_backend.handle_loading(sess.machine_info)
179
    sess.performance_regressions = []
180
    sess.compare_fail = [
181
        PercentageRegressionCheck("stddev", 5),
182
        DifferenceRegressionCheck("max", 0.000001)
183
    ]
184
    sess.finish()
185
    pytest.raises(PerformanceRegression, sess.display, Namespace(
186
        ensure_newline=lambda: None,
187
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
188
        write=lambda text, **opts: output.write(force_text(text)),
189
        rewrite=lambda text, **opts: output.write(force_text(text)),
190
    ))
191
    print(output.getvalue())
192
    assert sess.performance_regressions == {
193
        'normal': [
194
            ('test_xfast_parametrized[0] (0001_b87b9aa)',
195
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
196
            ('test_xfast_parametrized[0] (0001_b87b9aa)',
197
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
198
        ],
199
        'short': [
200
            ('xfast_parametrized[0] (0001)',
201
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
202
            ('xfast_parametrized[0] (0001)',
203
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
204
        ],
205
        'long': [
206
            ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
207
             "Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000"),
208
            ('tests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes)',
209
             "Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000")
210
        ],
211
    }[name_format]
212
    output = make_logger(sess)
213
    pytest.raises(PerformanceRegression, sess.check_regressions)
214
    print(output.getvalue())
215
    assert output.getvalue() == {
216
        'short': """Performance has regressed:
217
\txfast_parametrized[0] (0001) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
218
\txfast_parametrized[0] (0001) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
219
""",
220
        'normal': """Performance has regressed:
221
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
222
\ttest_xfast_parametrized[0] (0001_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
223
""",
224
        'long': """Performance has regressed:
225
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: 23.331641765 > 5.000000000
226
\ttests/test_normal.py::test_xfast_parametrized[0] (0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000001843 > 0.000001000
227
"""
228
    }[name_format]
229
230
231
@pytest.mark.skipif(sys.version_info[:2] < (2, 7),
232
                    reason="Something weird going on, see: https://bugs.python.org/issue4482")
233
def test_regression_checks_inf(sess, name_format):
234
    output = make_logger(sess)
235
    sess.report_backend.compare = '0002'
236
    sess.report_backend.handle_loading(sess.machine_info)
237
    sess.performance_regressions = []
238
    sess.compare_fail = [
239
        PercentageRegressionCheck("stddev", 5),
240
        DifferenceRegressionCheck("max", 0.000001)
241
    ]
242
    sess.finish()
243
    pytest.raises(PerformanceRegression, sess.display, Namespace(
244
        ensure_newline=lambda: None,
245
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
246
        write=lambda text, **opts: output.write(force_text(text)),
247
        rewrite=lambda text, **opts: output.write(force_text(text)),
248
    ))
249
    print(output.getvalue())
250
    assert sess.performance_regressions == {
251
        'normal': [
252
            ('test_xfast_parametrized[0] (0002_b87b9aa)',
253
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
254
            ('test_xfast_parametrized[0] (0002_b87b9aa)',
255
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000")
256
        ],
257
        'short': [
258
            ('xfast_parametrized[0] (0002)',
259
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
260
            ('xfast_parametrized[0] (0002)',
261
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000")
262
        ],
263
        'long': [
264
            ('tests/test_normal.py::test_xfast_parametrized[0] '
265
             '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
266
             "Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000"),
267
            ('tests/test_normal.py::test_xfast_parametrized[0] '
268
             '(0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes)',
269
             "Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > "
270
             '0.000001000')
271
        ]
272
    }[name_format]
273
    output = make_logger(sess)
274
    pytest.raises(PerformanceRegression, sess.check_regressions)
275
    print(output.getvalue())
276
    assert output.getvalue() == {
277
        'short': """Performance has regressed:
278
\txfast_parametrized[0] (0002) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
279
\txfast_parametrized[0] (0002) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
280
""",
281
        'normal': """Performance has regressed:
282
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
283
\ttest_xfast_parametrized[0] (0002_b87b9aa) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
284
""",
285
        'long': """Performance has regressed:
286
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'stddev' has failed PercentageRegressionCheck: inf > 5.000000000
287
\ttests/test_normal.py::test_xfast_parametrized[0] (0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes) - Field 'max' has failed DifferenceRegressionCheck: 0.000005551 > 0.000001000
288
"""
289
    }[name_format]
290
291
292 View Code Duplication
def test_compare_1(sess, LineMatcher):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
293
    output = make_logger(sess)
294
    sess.report_backend.handle_loading(sess.machine_info)
295
    sess.finish()
296
    sess.display(Namespace(
297
        ensure_newline=lambda: None,
298
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
299
        write=lambda text, **opts: output.write(force_text(text)),
300
        rewrite=lambda text, **opts: output.write(force_text(text)),
301
    ))
302
    print(output.getvalue())
303
    LineMatcher(output.getvalue().splitlines()).fnmatch_lines([
304
        'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}',
305
        'Comparing against benchmarks from: 0001_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190343_uncommitted'
306
        '-changes.json',
307
        '',
308
        '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
309
        'Name (time in ns)               *      Min                 *Max                Mean              StdDev              Median                IQR            Outliers(*)  Rounds  Iterations',
310
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
311
        '*xfast_parametrized[[]0[]] (0001*)     217.3145 (1.0)      11*447.3891 (1.0)      262.2408 (1.00)     214.0442 (1.0)      220.1664 (1.00)     38.2154 (2.03)         90;1878    9987         418',
312
        '*xfast_parametrized[[]0[]] (NOW) *     217.9511 (1.00)     13*290.0380 (1.16)     261.2051 (1.0)      263.9842 (1.23)     220.1638 (1.0)      18.8080 (1.0)         160;1726    9710         431',
313
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
314
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
315
    ])
316
317
318 View Code Duplication
def test_compare_2(sess, LineMatcher):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
319
    output = make_logger(sess)
320
    sess.report_backend.compare = '0002'
321
    sess.report_backend.handle_loading(sess.machine_info)
322
    sess.finish()
323
    sess.display(Namespace(
324
        ensure_newline=lambda: None,
325
        write_line=lambda line, **opts: output.write(force_text(line) + u'\n'),
326
        section=lambda line, **opts: output.write(force_text(line) + u'\n'),
327
        write=lambda text, **opts: output.write(force_text(text)),
328
        rewrite=lambda text, **opts: output.write(force_text(text)),
329
    ))
330
    print(output.getvalue())
331
    LineMatcher(output.getvalue().splitlines()).fnmatch_lines([
332
        'BENCHMARK-C6: Benchmark machine_info is different. Current: {foo: "bar"} VS saved: {machine: "x86_64", node: "minibox", processor: "x86_64", python_compiler: "GCC 4.6.3", python_implementation: "CPython", python_version: "2.7.3", release: "3.13.0-55-generic", system: "Linux"}. {\'fslocation\': \'tests*test_storage\'}',
333
        'Comparing against benchmarks from: 0002_b87b9aae14ff14a7887a6bbaa9731b9a8760555d_20150814_190348_uncommitted-changes.json',
334
        '',
335
        '*------------------------------------------------------------------------ benchmark: 2 tests -----------------------------------------------------------------------*',
336
        'Name (time in ns)            *         Min                 *Max                Mean              StdDev              Median                IQR            Outliers(*)  Rounds  Iterations',
337
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
338
        '*xfast_parametrized[[]0[]] (0002*)     216.9028 (1.0)       7*739.2997 (1.0)      254.0585 (1.0)        0.0000 (1.0)      219.8103 (1.0)      27.3309 (1.45)        235;1688   11009         410',
339
        '*xfast_parametrized[[]0[]] (NOW) *     217.9511 (1.00)     13*290.0380 (1.72)     261.2051 (1.03)     263.9842 (inf)      220.1638 (1.00)     18.8080 (1.0)         160;1726    9710         431',
340
        '--------------------------------------------------------------------------------------------------------------------------------------------------------------------*',
341
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
342
    ])
343
344
@freeze_time("2015-08-15T00:04:18.687119")
345
def test_save_json(sess, tmpdir, monkeypatch):
346
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
347
    sess.report_backend.save = False
348
    sess.report_backend.autosave = False
349
    sess.report_backend.json = LooseFileLike()
350
    sess.report_backend.save_data = False
351
    sess.report_backend.handle_saving(sess.benchmarks, sess.machine_info)
352
    assert tmpdir.listdir() == []
353
    assert json.loads(sess.report_backend.json.getvalue().decode()) == JSON_DATA
354
355
356 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
357
def test_save_with_name(sess, tmpdir, monkeypatch):
358
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
359
    sess.save = 'foobar'
360
    sess.report_backend.autosave = True
361
    sess.report_backend.json = None
362
    sess.report_backend.save_data = False
363
    sess.report_backend.storage.path = Path(str(tmpdir))
364
    sess.report_backend.handle_saving(sess.benchmarks, sess.machine_info)
365
    files = list(Path(str(tmpdir)).rglob('*.json'))
366
    print(files)
367
    assert len(files) == 1
368
    assert json.load(files[0].open('rU')) == SAVE_DATA
369
370
371 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
372
def test_save_no_name(sess, tmpdir, monkeypatch):
373
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
374
    sess.report_backend.save = True
375
    sess.report_backend.autosave = True
376
    sess.report_backend.json = None
377
    sess.report_backend.save_data = False
378
    sess.report_backend.storage.path = Path(str(tmpdir))
379
    sess.report_backend.handle_saving(sess.benchmarks, sess.machine_info)
380
    files = list(Path(str(tmpdir)).rglob('*.json'))
381
    assert len(files) == 1
382
    assert json.load(files[0].open('rU')) == SAVE_DATA
383
384
385 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
386
def test_save_with_error(sess, tmpdir, monkeypatch):
387
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
388
    sess.report_backend.save = True
389
    sess.report_backend.autosave = True
390
    sess.report_backend.json = None
391
    sess.report_backend.save_data = False
392
    sess.report_backend.storage.path = Path(str(tmpdir))
393
    for bench in sess.benchmarks:
394
        bench.has_error = True
395
    sess.report_backend.handle_saving(sess.benchmarks, sess.machine_info)
396
    files = list(Path(str(tmpdir)).rglob('*.json'))
397
    assert len(files) == 1
398
    assert json.load(files[0].open('rU')) == {
399
        'benchmarks': [],
400
        'commit_info': {'foo': 'bar'},
401
        'datetime': '2015-08-15T00:04:18.687119',
402
        'machine_info': {'foo': 'bar'},
403
        'version': '2.5.0'
404
    }
405
406
407 View Code Duplication
@freeze_time("2015-08-15T00:04:18.687119")
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
408
def test_autosave(sess, tmpdir, monkeypatch):
409
    monkeypatch.setattr(plugin, '__version__', '2.5.0')
410
    sess.report_backend.save = False
411
    sess.report_backend.autosave = True
412
    sess.report_backend.json = None
413
    sess.report_backend.save_data = False
414
    sess.report_backend.storage.path = Path(str(tmpdir))
415
    sess.report_backend.handle_saving(sess.benchmarks, sess.machine_info)
416
    files = list(Path(str(tmpdir)).rglob('*.json'))
417
    assert len(files) == 1
418
    assert json.load(files[0].open('rU')) == SAVE_DATA
419