1
|
|
|
import argparse |
2
|
|
|
|
3
|
|
|
import py |
4
|
|
|
|
5
|
|
|
from pytest_benchmark.csv import CSVResults |
6
|
|
|
|
7
|
|
|
from . import plugin |
8
|
|
|
from .logger import Logger |
9
|
|
|
from .plugin import add_csv_options |
10
|
|
|
from .plugin import add_display_options |
11
|
|
|
from .plugin import add_global_options |
12
|
|
|
from .plugin import add_histogram_options |
13
|
|
|
from .table import TableResults |
14
|
|
|
from .utils import NAME_FORMATTERS |
15
|
|
|
from .utils import first_or_value |
16
|
|
|
from .utils import load_storage |
17
|
|
|
from .utils import report_noprogress |
18
|
|
|
|
19
|
|
|
COMPARE_HELP = '''examples: |
20
|
|
|
|
21
|
|
|
pytest-benchmark {0} 'Linux-CPython-3.5-64bit/*' |
22
|
|
|
|
23
|
|
|
Loads all benchmarks ran with that interpreter. Note the special quoting that disables your shell's glob |
24
|
|
|
expansion. |
25
|
|
|
|
26
|
|
|
pytest-benchmark {0} 0001 |
27
|
|
|
|
28
|
|
|
Loads first run from all the interpreters. |
29
|
|
|
|
30
|
|
|
pytest-benchmark {0} /foo/bar/0001_abc.json /lorem/ipsum/0001_sir_dolor.json |
31
|
|
|
|
32
|
|
|
Loads runs from exactly those files.''' |
33
|
|
|
|
34
|
|
|
|
35
|
|
|
class HelpAction(argparse.Action): |
36
|
|
|
def __call__(self, parser, namespace, values, option_string=None): |
37
|
|
|
namespace.help = True |
38
|
|
|
namespace.command = values or 'help' |
39
|
|
|
|
40
|
|
|
|
41
|
|
|
class CommandArgumentParser(argparse.ArgumentParser): |
42
|
|
|
commands = None |
43
|
|
|
commands_dispatch = None |
44
|
|
|
|
45
|
|
|
def __init__(self, *args, **kwargs): |
46
|
|
|
kwargs['add_help'] = False |
47
|
|
|
|
48
|
|
|
super(CommandArgumentParser, self).__init__(*args, |
49
|
|
|
formatter_class=argparse.RawDescriptionHelpFormatter, |
50
|
|
|
**kwargs) |
51
|
|
|
self.add_argument( |
52
|
|
|
'-h', '--help', |
53
|
|
|
metavar='COMMAND', |
54
|
|
|
nargs='?', action=HelpAction, help='Display help and exit.' |
55
|
|
|
) |
56
|
|
|
self.add_command( |
57
|
|
|
'help', |
58
|
|
|
description='Display help and exit.' |
59
|
|
|
).add_argument( |
60
|
|
|
'command', |
61
|
|
|
nargs='?', action=HelpAction |
62
|
|
|
) |
63
|
|
|
|
64
|
|
|
def add_command(self, name, **opts): |
65
|
|
|
if self.commands is None: |
66
|
|
|
self.commands = self.add_subparsers( |
67
|
|
|
title='commands', dest='command', parser_class=argparse.ArgumentParser, |
68
|
|
|
) |
69
|
|
|
self.commands_dispatch = {} |
70
|
|
|
if 'description' in opts and 'help' not in opts: |
71
|
|
|
opts['help'] = opts['description'] |
72
|
|
|
|
73
|
|
|
command = self.commands.add_parser( |
74
|
|
|
name, formatter_class=argparse.RawDescriptionHelpFormatter, **opts |
75
|
|
|
) |
76
|
|
|
self.commands_dispatch[name] = command |
77
|
|
|
return command |
78
|
|
|
|
79
|
|
|
def parse_args(self): |
80
|
|
|
args = super(CommandArgumentParser, self).parse_args() |
81
|
|
|
if args.help: |
82
|
|
|
if args.command: |
83
|
|
|
return super(CommandArgumentParser, self).parse_args([args.command, '--help']) |
84
|
|
|
else: |
85
|
|
|
self.print_help() |
86
|
|
|
self.exit() |
87
|
|
|
elif not args.command: |
88
|
|
|
self.error('the following arguments are required: COMMAND (choose from %s)' % ', '.join( |
89
|
|
|
map(repr, self.commands.choices))) |
90
|
|
|
return args |
91
|
|
|
|
92
|
|
|
|
93
|
|
|
def add_glob_or_file(addoption): |
94
|
|
|
addoption( |
95
|
|
|
'glob_or_file', |
96
|
|
|
nargs='*', help='Glob or exact path for json files. If not specified all runs are loaded.' |
97
|
|
|
) |
98
|
|
|
|
99
|
|
|
|
100
|
|
|
def make_parser(): |
101
|
|
|
parser = CommandArgumentParser('py.test-benchmark', description="pytest_benchmark's management commands.") |
102
|
|
|
add_global_options(parser.add_argument, prefix="") |
103
|
|
|
|
104
|
|
|
parser.add_command('list', description='List saved runs.') |
105
|
|
|
|
106
|
|
|
compare_command = parser.add_command( |
107
|
|
|
'compare', |
108
|
|
|
description='Compare saved runs.', |
109
|
|
|
epilog='''examples: |
110
|
|
|
|
111
|
|
|
pytest-benchmark compare 'Linux-CPython-3.5-64bit/*' |
112
|
|
|
|
113
|
|
|
Loads all benchmarks ran with that interpreter. Note the special quoting that disables your shell's glob |
114
|
|
|
expansion. |
115
|
|
|
|
116
|
|
|
pytest-benchmark compare 0001 |
117
|
|
|
|
118
|
|
|
Loads first run from all the interpreters. |
119
|
|
|
|
120
|
|
|
pytest-benchmark compare /foo/bar/0001_abc.json /lorem/ipsum/0001_sir_dolor.json |
121
|
|
|
|
122
|
|
|
Loads runs from exactly those files.''') |
123
|
|
|
add_display_options(compare_command.add_argument, prefix="") |
124
|
|
|
add_histogram_options(compare_command.add_argument, prefix="") |
125
|
|
|
add_glob_or_file(compare_command.add_argument) |
126
|
|
|
add_csv_options(compare_command.add_argument, prefix="") |
127
|
|
|
|
128
|
|
|
return parser |
129
|
|
|
|
130
|
|
|
|
131
|
|
|
def load(storage, glob_or_file, group_by): |
132
|
|
|
conftest = py.path.local('conftest.py') |
133
|
|
|
if conftest.check(): |
134
|
|
|
conftest = conftest.pyimport() |
135
|
|
|
else: |
136
|
|
|
conftest = None |
137
|
|
|
|
138
|
|
|
groups = getattr(conftest, 'pytest_benchmark_group_stats', plugin.pytest_benchmark_group_stats)( |
139
|
|
|
benchmarks=storage.load_benchmarks(*glob_or_file), |
140
|
|
|
group_by=group_by, |
141
|
|
|
config=None, |
142
|
|
|
) |
143
|
|
|
return groups |
144
|
|
|
|
145
|
|
|
|
146
|
|
|
def main(): |
147
|
|
|
parser = make_parser() |
148
|
|
|
args = parser.parse_args() |
149
|
|
|
logger = Logger(args.verbose) |
150
|
|
|
storage = load_storage(args.storage, logger=logger, netrc=args.netrc) |
151
|
|
|
|
152
|
|
|
if args.command == 'list': |
153
|
|
|
for file in storage.query(): |
154
|
|
|
print(file) |
155
|
|
|
elif args.command == 'compare': |
156
|
|
|
results_table = TableResults( |
157
|
|
|
args.columns, args.sort, first_or_value(args.histogram, False), NAME_FORMATTERS[args.name], logger |
158
|
|
|
) |
159
|
|
|
groups = load(storage, args.glob_or_file, args.group_by) |
160
|
|
|
|
161
|
|
|
results_table.display(TerminalReporter(), groups, progress_reporter=report_noprogress) |
162
|
|
|
if args.csv: |
163
|
|
|
results_csv = CSVResults(args.columns, args.sort, logger) |
164
|
|
|
output_file, = args.csv |
165
|
|
|
|
166
|
|
|
results_csv.render(output_file, groups) |
167
|
|
|
elif args.command is None: |
168
|
|
|
parser.error("missing command (available commands: %s)" % ', '.join(map(repr, parser.commands.choices))) |
169
|
|
|
|
170
|
|
|
|
171
|
|
|
class TerminalReporter(object): |
172
|
|
|
def __init__(self): |
173
|
|
|
self._tw = py.io.TerminalWriter() |
174
|
|
|
|
175
|
|
|
def ensure_newline(self): |
176
|
|
|
pass |
177
|
|
|
|
178
|
|
|
def write(self, content, **markup): |
179
|
|
|
self._tw.write(content, **markup) |
180
|
|
|
|
181
|
|
|
def write_line(self, line, **markup): |
182
|
|
|
if not py.builtin._istext(line): |
183
|
|
|
line = py.builtin.text(line, errors="replace") |
184
|
|
|
self._tw.line(line, **markup) |
185
|
|
|
|
186
|
|
|
def rewrite(self, line, **markup): |
187
|
|
|
line = str(line) |
188
|
|
|
self._tw.write("\r" + line, **markup) |
189
|
|
|
|
190
|
|
|
def write_sep(self, sep, title=None, **markup): |
191
|
|
|
self._tw.sep(sep, title, **markup) |
192
|
|
|
|
193
|
|
|
def section(self, title, sep="=", **kw): |
194
|
|
|
self._tw.sep(sep, title, **kw) |
195
|
|
|
|
196
|
|
|
def line(self, msg, **kw): |
197
|
|
|
self._tw.line(msg, **kw) |
198
|
|
|
|