1
|
|
|
from __future__ import division |
2
|
|
|
from __future__ import print_function |
3
|
|
|
|
4
|
|
|
import operator |
5
|
|
|
import sys |
6
|
|
|
from math import isinf |
7
|
|
|
|
8
|
|
|
from .utils import report_progress |
9
|
|
|
from .utils import time_unit |
10
|
|
|
|
11
|
|
|
NUMBER_FMT = "{0:,.4f}" if sys.version_info[:2] > (2, 6) else "{0:.4f}" |
12
|
|
|
ALIGNED_NUMBER_FMT = "{0:>{1},.4f}{2:<{3}}" if sys.version_info[:2] > (2, 6) else "{0:>{1}.4f}{2:<{3}}" |
13
|
|
|
|
14
|
|
|
|
15
|
|
|
class ResultsTable(object): |
16
|
|
|
def __init__(self, columns, sort, histogram, logger): |
17
|
|
|
self.columns = columns |
18
|
|
|
self.sort = sort |
19
|
|
|
self.histogram = histogram |
20
|
|
|
self.logger = logger |
21
|
|
|
|
22
|
|
|
def display(self, tr, groups): |
23
|
|
|
tr.write_line("") |
24
|
|
|
tr.rewrite("Computing stats ...", black=True, bold=True) |
25
|
|
|
for line, (group, benchmarks) in report_progress(groups, tr, "Computing stats ... group {pos}/{total}"): |
26
|
|
|
benchmarks = sorted(benchmarks, key=operator.itemgetter(self.sort)) |
27
|
|
|
|
28
|
|
|
worst = {} |
29
|
|
|
best = {} |
30
|
|
|
solo = len(benchmarks) == 1 |
31
|
|
|
for line, prop in report_progress(("min", "max", "mean", "median", "iqr", "stddev"), tr, "{line}: {value}", |
32
|
|
|
line=line): |
33
|
|
|
worst[prop] = max(bench[prop] for _, bench in report_progress( |
34
|
|
|
benchmarks, tr, "{line} ({pos}/{total})", line=line)) |
35
|
|
|
best[prop] = min(bench[prop] for _, bench in report_progress( |
36
|
|
|
benchmarks, tr, "{line} ({pos}/{total})", line=line)) |
37
|
|
|
for line, prop in report_progress(("outliers", "rounds", "iterations"), tr, "{line}: {value}", line=line): |
38
|
|
|
worst[prop] = max(benchmark[prop] for _, benchmark in report_progress( |
39
|
|
|
benchmarks, tr, "{line} ({pos}/{total})", line=line)) |
40
|
|
|
|
41
|
|
|
time_unit_key = self.sort |
42
|
|
|
if self.sort in ("name", "fullname"): |
43
|
|
|
time_unit_key = "min" |
44
|
|
|
unit, adjustment = time_unit(best.get(self.sort, benchmarks[0][time_unit_key])) |
45
|
|
|
labels = { |
46
|
|
|
"name": "Name (time in {0}s)".format(unit), |
47
|
|
|
"min": "Min", |
48
|
|
|
"max": "Max", |
49
|
|
|
"mean": "Mean", |
50
|
|
|
"stddev": "StdDev", |
51
|
|
|
"rounds": "Rounds", |
52
|
|
|
"iterations": "Iterations", |
53
|
|
|
"iqr": "IQR", |
54
|
|
|
"median": "Median", |
55
|
|
|
"outliers": "Outliers(*)", |
56
|
|
|
} |
57
|
|
|
widths = { |
58
|
|
|
"name": 3 + max(len(labels["name"]), max(len(benchmark["name"]) for benchmark in benchmarks)), |
59
|
|
|
"rounds": 2 + max(len(labels["rounds"]), len(str(worst["rounds"]))), |
60
|
|
|
"iterations": 2 + max(len(labels["iterations"]), len(str(worst["iterations"]))), |
61
|
|
|
"outliers": 2 + max(len(labels["outliers"]), len(str(worst["outliers"]))), |
62
|
|
|
} |
63
|
|
|
for prop in "min", "max", "mean", "stddev", "median", "iqr": |
64
|
|
|
widths[prop] = 2 + max(len(labels[prop]), max( |
65
|
|
|
len(NUMBER_FMT.format(bench[prop] * adjustment)) |
66
|
|
|
for bench in benchmarks |
67
|
|
|
)) |
68
|
|
|
|
69
|
|
|
rpadding = 0 if solo else 10 |
70
|
|
|
labels_line = labels["name"].ljust(widths["name"]) + "".join( |
71
|
|
|
labels[prop].rjust(widths[prop]) + ( |
72
|
|
|
" " * rpadding |
73
|
|
|
if prop not in ["outliers", "rounds", "iterations"] |
74
|
|
|
else "" |
75
|
|
|
) |
76
|
|
|
for prop in self.columns |
77
|
|
|
) |
78
|
|
|
tr.rewrite("") |
79
|
|
|
tr.write_line( |
80
|
|
|
" benchmark{name}: {count} tests ".format( |
81
|
|
|
count=len(benchmarks), |
82
|
|
|
name="" if group is None else " {0!r}".format(group), |
83
|
|
|
).center(len(labels_line), "-"), |
84
|
|
|
yellow=True, |
85
|
|
|
) |
86
|
|
|
tr.write_line(labels_line) |
87
|
|
|
tr.write_line("-" * len(labels_line), yellow=True) |
88
|
|
|
|
89
|
|
|
for bench in benchmarks: |
90
|
|
|
has_error = bench.get("has_error") |
91
|
|
|
tr.write(bench["name"].ljust(widths["name"]), red=has_error, invert=has_error) |
92
|
|
|
for prop in self.columns: |
93
|
|
|
if prop in ("min", "max", "mean", "stddev", "median", "iqr"): |
94
|
|
|
tr.write( |
95
|
|
|
ALIGNED_NUMBER_FMT.format( |
96
|
|
|
bench[prop] * adjustment, |
97
|
|
|
widths[prop], |
98
|
|
|
compute_baseline_scale(best[prop], bench[prop], rpadding), |
99
|
|
|
rpadding |
100
|
|
|
), |
101
|
|
|
green=not solo and bench[prop] == best.get(prop), |
102
|
|
|
red=not solo and bench[prop] == worst.get(prop), |
103
|
|
|
bold=True, |
104
|
|
|
) |
105
|
|
|
else: |
106
|
|
|
tr.write("{0:>{1}}".format(bench[prop], widths[prop])) |
107
|
|
|
tr.write("\n") |
108
|
|
|
tr.write_line("-" * len(labels_line), yellow=True) |
109
|
|
|
tr.write_line("") |
110
|
|
|
if self.histogram: |
111
|
|
|
from .histogram import make_histogram |
112
|
|
|
print(["{0[name]}".format(row) for row in benchmarks]) |
113
|
|
|
if len(benchmarks) > 75: |
114
|
|
|
self.logger.warn("BENCHMARK-H1", |
115
|
|
|
"Group {0!r} has too many benchmarks. Only plotting 50 benchmarks.".format(group)) |
116
|
|
|
benchmarks = benchmarks[:75] |
117
|
|
|
|
118
|
|
|
output_file = make_histogram(self.histogram, group, benchmarks, unit, adjustment) |
119
|
|
|
|
120
|
|
|
self.logger.info("Generated histogram {0}".format(output_file), bold=True) |
121
|
|
|
|
122
|
|
|
tr.write_line("(*) Outliers: 1 Standard Deviation from Mean; " |
123
|
|
|
"1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.", bold=True, black=True) |
124
|
|
|
|
125
|
|
|
|
126
|
|
|
def compute_baseline_scale(baseline, value, width): |
127
|
|
|
if not width: |
128
|
|
|
return "" |
129
|
|
|
if value == baseline: |
130
|
|
|
return " (1.0)".ljust(width) |
131
|
|
|
|
132
|
|
|
scale = abs(value / baseline) if baseline else float("inf") |
133
|
|
|
if scale > 1000: |
134
|
|
|
if isinf(scale): |
135
|
|
|
return " (inf)".ljust(width) |
136
|
|
|
else: |
137
|
|
|
return " (>1000.0)".ljust(width) |
138
|
|
|
else: |
139
|
|
|
return " ({0:.2f})".format(scale).ljust(width) |
140
|
|
|
|