Completed
Push — master ( 319e30...e8e7e8 )
by Ionel Cristian
32s
created

testdir()   A

Complexity

Conditions 3

Size

Total Lines 5

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 3
c 1
b 0
f 0
dl 0
loc 5
rs 9.4285
1
from collections import namedtuple
2
import sys
3
4
import py
5
import pytest
6
from _pytest.pytester import LineMatcher
7
8
pytest_plugins = 'pytester',
9
10
THIS = py.path.local(__file__)
11
STORAGE = THIS.dirpath('test_storage')
12
13
14
@pytest.fixture
15
def testdir(testdir, monkeypatch):
16
    return namedtuple('testdir', 'tmpdir,run')(
17
        testdir.tmpdir,
18
        lambda bin, *args: testdir.run(bin+".exe" if sys.platform == "win32" else bin, *args))
19
20
21
def test_help(testdir):
22
    result = testdir.run('py.test-benchmark', '--help')
23
    result.stdout.fnmatch_lines([
24
        "usage: py.test-benchmark [-h [COMMAND]] [--storage URI] [--verbose]",
25
        "                         {help,list,compare} ...",
26
        "",
27
        "pytest_benchmark's management commands.",
28
        "",
29
        "optional arguments:",
30
        "  -h [COMMAND], --help [COMMAND]",
31
        "                        Display help and exit.",
32
        "  --storage URI, -s URI",
33
        "                        Specify a path to store the runs as uri in form",
34
        "                        file://path or elasticsearch+http[s]://host1,host2/[in",
35
        "                        dex/doctype?project_name=Project] (when --benchmark-",
36
        "                        save or --benchmark-autosave are used). For backwards",
37
        "                        compatibility unexpected values are converted to",
38
        "                        file://<value>. Default: 'file://./.benchmarks'.",
39
        "  --verbose, -v         Dump diagnostic and progress information.",
40
        "",
41
        "commands:",
42
        "  {help,list,compare}",
43
        "    help                Display help and exit.",
44
        "    list                List saved runs.",
45
        "    compare             Compare saved runs.",
46
    ])
47
    assert result.ret == 0
48
49
50
def test_help(testdir):
51
    result = testdir.run('py.test-benchmark', 'help')
52
    result.stdout.fnmatch_lines([
53
        'usage: py.test-benchmark help [-h] [command]',
54
        '',
55
        'Display help and exit.',
56
        '',
57
        'positional arguments:',
58
        '  command',
59
        '',
60
        'optional arguments:',
61
        '  -h, --help  show this help message and exit',
62
    ])
63
64
65
@pytest.mark.parametrize('args', ['list --help', 'help list'])
66
def test_help_list(testdir, args):
67
    result = testdir.run('py.test-benchmark', *args.split())
68
    result.stdout.fnmatch_lines([
69
        "usage: py.test-benchmark list [-h]",
70
        "",
71
        "List saved runs.",
72
        "",
73
        "optional arguments:",
74
        "  -h, --help  show this help message and exit",
75
    ])
76
    assert result.ret == 0
77
78
79
@pytest.mark.parametrize('args', ['compare --help', 'help compare'])
80
def test_help_compare(testdir, args):
81
    result = testdir.run('py.test-benchmark', *args.split())
82
    result.stdout.fnmatch_lines([
83
        "usage: py.test-benchmark compare [-h] [--sort COL] [--group-by LABEL]",
84
        "                                 [--columns LABELS] [--name FORMAT]",
85
        "                                 [--histogram [FILENAME-PREFIX]]",
86
        "                                 [--csv [FILENAME]]",
87
        "                                 [glob_or_file [glob_or_file ...]]",
88
        "",
89
        "Compare saved runs.",
90
        "",
91
        "positional arguments:",
92
        "  glob_or_file          Glob or exact path for json files. If not specified",
93
        "                        all runs are loaded.",
94
        "",
95
        "optional arguments:",
96
        "  -h, --help            show this help message and exit",
97
        "  --sort COL            Column to sort on. Can be one of: 'min', 'max',",
98
        "                        'mean', 'stddev', 'name', 'fullname'. Default: 'min'",
99
        "  --group-by LABEL      How to group tests. Can be one of: 'group', 'name',",
100
        "                        'fullname', 'func', 'fullfunc', 'param' or",
101
        "                        'param:NAME', where NAME is the name passed to",
102
        "                        @pytest.parametrize. Default: 'group'",
103
        "  --columns LABELS      Comma-separated list of columns to show in the result",
104
        "                        table. Default: 'min, max, mean, stddev, median, iqr,",
105
        "                        outliers, rounds, iterations'",
106
        "  --name FORMAT         How to format names in results. Can be one of 'short',",
107
        "                        'normal', 'long'. Default: 'normal'",
108
        "  --histogram [FILENAME-PREFIX]",
109
        "                        Plot graphs of min/max/avg/stddev over time in",
110
        "                        FILENAME-PREFIX-test_name.svg. If FILENAME-PREFIX",
111
        "                        contains slashes ('/') then directories will be",
112
        "                        created. Default: 'benchmark_*'",
113
        "  --csv [FILENAME]      Save a csv report. If FILENAME contains slashes ('/')",
114
        "                        then directories will be created. Default:",
115
        "                        'benchmark_*'",
116
        "",
117
        "examples:",
118
        "",
119
        "    pytest-benchmark compare 'Linux-CPython-3.5-64bit/*'",
120
        "",
121
        "        Loads all benchmarks ran with that interpreter. Note the special quoting that disables your shell's "
122
        "glob",
123
        "        expansion.",
124
        "",
125
        "    pytest-benchmark compare 0001",
126
        "",
127
        "        Loads first run from all the interpreters.",
128
        "",
129
        "    pytest-benchmark compare /foo/bar/0001_abc.json /lorem/ipsum/0001_sir_dolor.json",
130
        "",
131
        "        Loads runs from exactly those files.",
132
    ])
133
    assert result.ret == 0
134
135
136
def test_list(testdir):
137
    result = testdir.run('py.test-benchmark', '--storage', STORAGE, 'list')
138
    assert result.stderr.lines == []
139
    result.stdout.fnmatch_lines([
140
        '*0001_*.json',
141
        '*0002_*.json',
142
        '*0003_*.json',
143
        '*0004_*.json',
144
        '*0005_*.json',
145
        '*0006_*.json',
146
        '*0007_*.json',
147
        '*0008_*.json',
148
        '*0009_*.json',
149
        '*0010_*.json',
150
        '*0011_*.json',
151
        '*0012_*.json',
152
        '*0013_*.json',
153
        '*0014_*.json',
154
        '*0015_*.json',
155
        '*0016_*.json',
156
        '*0017_*.json',
157
        '*0018_*.json',
158
        '*0019_*.json',
159
        '*0020_*.json',
160
        '*0021_*.json',
161
        '*0022_*.json',
162
        '*0023_*.json',
163
        '*0024_*.json',
164
        '*0025_*.json',
165
        '*0026_*.json',
166
        '*0027_*.json',
167
        '*0028_*.json',
168
        '*0029_*.json',
169
        '*0030_*.json',
170
    ])
171
    assert result.ret == 0
172
173
174
@pytest.mark.parametrize('name', ['short', 'long', 'normal'])
175
def test_compare(testdir, name):
176
    result = testdir.run('py.test-benchmark', '--storage', STORAGE, 'compare', '0001', '0002', '0003',
177
                         '--sort', 'min',
178
                         '--columns', 'min,max',
179
                         '--name', name,
180
                         '--histogram', 'foobar',
181
                         '--csv', 'foobar')
182
    result.stderr.fnmatch_lines([
183
        'Generated csv: *foobar.csv'
184
    ])
185
    LineMatcher(testdir.tmpdir.join('foobar.csv').readlines(cr=0)).fnmatch_lines([
186
        "name,min,max",
187
        "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.15628567*e-07,1.03186158*e-05",
188
        "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.16902756*e-07,7.73929968*e-06",
189
        "tests/test_normal.py::test_xfast_parametrized[[]0[]],2.17314542*e-07,1.14473891*e-05",
190
        ""
191
    ])
192
    result.stdout.fnmatch_lines([
193
        'Computing stats ...',
194
        '---*--- benchmark: 3 tests ---*---',
195
        'Name (time in ns) *                   Min    *    Max          ',
196
        '---*---',
197
        '*xfast_parametrized[[]0[]] (0003*)     215.6286 (1.0)      10*318.6159 (1.33)   ',
198
        '*xfast_parametrized[[]0[]] (0002*)     216.9028 (1.01)      7*739.2997 (1.0)    ',
199
        '*xfast_parametrized[[]0[]] (0001*)     217.3145 (1.01)     11*447.3891 (1.48)   ',
200
        '---*---',
201
        '',
202
        '(*) Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.',
203
    ])
204
    assert result.ret == 0
205