TableResults.display()   F
last analyzed

Complexity

Conditions 26

Size

Total Lines 126

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
cc 26
c 0
b 0
f 0
dl 0
loc 126
rs 2

How to fix   Long Method    Complexity   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

Complexity

Complex classes like TableResults.display() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
from __future__ import division
2
from __future__ import print_function
3
4
import operator
5
import sys
6
from math import isinf
7
8
from .utils import operations_unit
9
from .utils import report_progress
10
from .utils import time_unit
11
12
NUMBER_FMT = "{0:,.4f}" if sys.version_info[:2] > (2, 6) else "{0:.4f}"
13
ALIGNED_NUMBER_FMT = "{0:>{1},.4f}{2:<{3}}" if sys.version_info[:2] > (2, 6) else "{0:>{1}.4f}{2:<{3}}"
14
15
16
class TableResults(object):
17
    def __init__(self, columns, sort, histogram, name_format, logger):
18
        self.columns = columns
19
        self.sort = sort
20
        self.histogram = histogram
21
        self.name_format = name_format
22
        self.logger = logger
23
24
    def display(self, tr, groups, progress_reporter=report_progress):
25
        tr.write_line("")
26
        tr.rewrite("Computing stats ...", black=True, bold=True)
27
        for line, (group, benchmarks) in progress_reporter(groups, tr, "Computing stats ... group {pos}/{total}"):
28
            benchmarks = sorted(benchmarks, key=operator.itemgetter(self.sort))
29
            for bench in benchmarks:
30
                bench["name"] = self.name_format(bench)
31
32
            worst = {}
33
            best = {}
34
            solo = len(benchmarks) == 1
35
            for line, prop in progress_reporter(("min", "max", "mean", "median", "iqr", "stddev", "ops"),
36
                                                tr, "{line}: {value}", line=line):
37
                if prop == "ops":
38
                    worst[prop] = min(bench[prop] for _, bench in progress_reporter(
39
                        benchmarks, tr, "{line} ({pos}/{total})", line=line))
40
                    best[prop] = max(bench[prop] for _, bench in progress_reporter(
41
                        benchmarks, tr, "{line} ({pos}/{total})", line=line))
42
                else:
43
                    worst[prop] = max(bench[prop] for _, bench in progress_reporter(
44
                        benchmarks, tr, "{line} ({pos}/{total})", line=line))
45
                    best[prop] = min(bench[prop] for _, bench in progress_reporter(
46
                        benchmarks, tr, "{line} ({pos}/{total})", line=line))
47
            for line, prop in progress_reporter(("outliers", "rounds", "iterations"), tr, "{line}: {value}", line=line):
48
                worst[prop] = max(benchmark[prop] for _, benchmark in progress_reporter(
49
                    benchmarks, tr, "{line} ({pos}/{total})", line=line))
50
51
            time_unit_key = self.sort
52
            if self.sort in ("name", "fullname"):
53
                time_unit_key = "min"
54
            unit, adjustment = time_unit(best.get(self.sort, benchmarks[0][time_unit_key]))
55
            ops_unit, ops_adjustment = operations_unit(worst.get('ops', benchmarks[0]['ops']))
56
            labels = {
57
                "name": "Name (time in {0}s)".format(unit),
58
                "min": "Min",
59
                "max": "Max",
60
                "mean": "Mean",
61
                "stddev": "StdDev",
62
                "rounds": "Rounds",
63
                "iterations": "Iterations",
64
                "iqr": "IQR",
65
                "median": "Median",
66
                "outliers": "Outliers",
67
                "ops": "OPS ({0}ops/s)".format(ops_unit) if ops_unit else "OPS",
68
            }
69
            widths = {
70
                "name": 3 + max(len(labels["name"]), max(len(benchmark["name"]) for benchmark in benchmarks)),
71
                "rounds": 2 + max(len(labels["rounds"]), len(str(worst["rounds"]))),
72
                "iterations": 2 + max(len(labels["iterations"]), len(str(worst["iterations"]))),
73
                "outliers": 2 + max(len(labels["outliers"]), len(str(worst["outliers"]))),
74
                "ops": 2 + max(len(labels["ops"]), len(NUMBER_FMT.format(best["ops"] * ops_adjustment))),
75
            }
76
            for prop in "min", "max", "mean", "stddev", "median", "iqr":
77
                widths[prop] = 2 + max(len(labels[prop]), max(
78
                    len(NUMBER_FMT.format(bench[prop] * adjustment))
79
                    for bench in benchmarks
80
                ))
81
82
            rpadding = 0 if solo else 10
83
            labels_line = labels["name"].ljust(widths["name"]) + "".join(
84
                labels[prop].rjust(widths[prop]) + (
85
                    " " * rpadding
86
                    if prop not in ["outliers", "rounds", "iterations"]
87
                    else ""
88
                )
89
                for prop in self.columns
90
            )
91
            tr.rewrite("")
92
            tr.write_line(
93
                " benchmark{name}: {count} tests ".format(
94
                    count=len(benchmarks),
95
                    name="" if group is None else " {0!r}".format(group),
96
                ).center(len(labels_line), "-"),
97
                yellow=True,
98
            )
99
            tr.write_line(labels_line)
100
            tr.write_line("-" * len(labels_line), yellow=True)
101
102
            for bench in benchmarks:
103
                has_error = bench.get("has_error")
104
                tr.write(bench["name"].ljust(widths["name"]), red=has_error, invert=has_error)
105
                for prop in self.columns:
106
                    if prop in ("min", "max", "mean", "stddev", "median", "iqr"):
107
                        tr.write(
108
                            ALIGNED_NUMBER_FMT.format(
109
                                bench[prop] * adjustment,
110
                                widths[prop],
111
                                compute_baseline_scale(best[prop], bench[prop], rpadding),
112
                                rpadding
113
                            ),
114
                            green=not solo and bench[prop] == best.get(prop),
115
                            red=not solo and bench[prop] == worst.get(prop),
116
                            bold=True,
117
                        )
118
                    elif prop == "ops":
119
                        tr.write(
120
                            ALIGNED_NUMBER_FMT.format(
121
                                bench[prop] * ops_adjustment,
122
                                widths[prop],
123
                                compute_baseline_scale(best[prop], bench[prop], rpadding),
124
                                rpadding
125
                            ),
126
                            green=not solo and bench[prop] == best.get(prop),
127
                            red=not solo and bench[prop] == worst.get(prop),
128
                            bold=True,
129
                        )
130
                    else:
131
                        tr.write("{0:>{1}}".format(bench[prop], widths[prop]))
132
                tr.write("\n")
133
            tr.write_line("-" * len(labels_line), yellow=True)
134
            tr.write_line("")
135
            if self.histogram:
136
                from .histogram import make_histogram
137
                if len(benchmarks) > 75:
138
                    self.logger.warn("BENCHMARK-H1",
139
                                     "Group {0!r} has too many benchmarks. Only plotting 50 benchmarks.".format(group))
140
                    benchmarks = benchmarks[:75]
141
142
                output_file = make_histogram(self.histogram, group, benchmarks, unit, adjustment)
143
144
                self.logger.info("Generated histogram: {0}".format(output_file), bold=True)
145
146
        tr.write_line("Legend:")
147
        tr.write_line("  Outliers: 1 Standard Deviation from Mean; "
148
                      "1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.")
149
        tr.write_line("  OPS: Operations Per Second, computed as 1 / Mean")
150
151
152
def compute_baseline_scale(baseline, value, width):
153
    if not width:
154
        return ""
155
    if value == baseline:
156
        return " (1.0)".ljust(width)
157
158
    scale = abs(value / baseline) if baseline else float("inf")
159
    if scale > 1000:
160
        if isinf(scale):
161
            return " (inf)".ljust(width)
162
        else:
163
            return " (>1000.0)".ljust(width)
164
    else:
165
        return " ({0:.2f})".format(scale).ljust(width)
166