| Conditions | 21 |
| Total Lines | 102 |
| Lines | 0 |
| Ratio | 0 % |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like src.pytest_benchmark.ResultsTable.display() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | from __future__ import division |
||
| 22 | def display(self, tr, groups): |
||
| 23 | tr.write_line("") |
||
| 24 | tr.rewrite("Computing stats ...", black=True, bold=True) |
||
| 25 | for line, (group, benchmarks) in report_progress(groups, tr, "Computing stats ... group {pos}/{total}"): |
||
| 26 | benchmarks = sorted(benchmarks, key=operator.itemgetter(self.sort)) |
||
| 27 | |||
| 28 | worst = {} |
||
| 29 | best = {} |
||
| 30 | solo = len(benchmarks) == 1 |
||
| 31 | for line, prop in report_progress(("min", "max", "mean", "median", "iqr", "stddev"), tr, "{line}: {value}", |
||
| 32 | line=line): |
||
| 33 | worst[prop] = max(bench[prop] for _, bench in report_progress( |
||
| 34 | benchmarks, tr, "{line} ({pos}/{total})", line=line)) |
||
| 35 | best[prop] = min(bench[prop] for _, bench in report_progress( |
||
| 36 | benchmarks, tr, "{line} ({pos}/{total})", line=line)) |
||
| 37 | for line, prop in report_progress(("outliers", "rounds", "iterations"), tr, "{line}: {value}", line=line): |
||
| 38 | worst[prop] = max(benchmark[prop] for _, benchmark in report_progress( |
||
| 39 | benchmarks, tr, "{line} ({pos}/{total})", line=line)) |
||
| 40 | |||
| 41 | time_unit_key = self.sort |
||
| 42 | if self.sort in ("name", "fullname"): |
||
| 43 | time_unit_key = "min" |
||
| 44 | unit, adjustment = time_unit(best.get(self.sort, benchmarks[0][time_unit_key])) |
||
| 45 | labels = { |
||
| 46 | "name": "Name (time in {0}s)".format(unit), |
||
| 47 | "min": "Min", |
||
| 48 | "max": "Max", |
||
| 49 | "mean": "Mean", |
||
| 50 | "stddev": "StdDev", |
||
| 51 | "rounds": "Rounds", |
||
| 52 | "iterations": "Iterations", |
||
| 53 | "iqr": "IQR", |
||
| 54 | "median": "Median", |
||
| 55 | "outliers": "Outliers(*)", |
||
| 56 | } |
||
| 57 | widths = { |
||
| 58 | "name": 3 + max(len(labels["name"]), max(len(benchmark["name"]) for benchmark in benchmarks)), |
||
| 59 | "rounds": 2 + max(len(labels["rounds"]), len(str(worst["rounds"]))), |
||
| 60 | "iterations": 2 + max(len(labels["iterations"]), len(str(worst["iterations"]))), |
||
| 61 | "outliers": 2 + max(len(labels["outliers"]), len(str(worst["outliers"]))), |
||
| 62 | } |
||
| 63 | for prop in "min", "max", "mean", "stddev", "median", "iqr": |
||
| 64 | widths[prop] = 2 + max(len(labels[prop]), max( |
||
| 65 | len(NUMBER_FMT.format(bench[prop] * adjustment)) |
||
| 66 | for bench in benchmarks |
||
| 67 | )) |
||
| 68 | |||
| 69 | rpadding = 0 if solo else 10 |
||
| 70 | labels_line = labels["name"].ljust(widths["name"]) + "".join( |
||
| 71 | labels[prop].rjust(widths[prop]) + ( |
||
| 72 | " " * rpadding |
||
| 73 | if prop not in ["outliers", "rounds", "iterations"] |
||
| 74 | else "" |
||
| 75 | ) |
||
| 76 | for prop in self.columns |
||
| 77 | ) |
||
| 78 | tr.rewrite("") |
||
| 79 | tr.write_line( |
||
| 80 | " benchmark{name}: {count} tests ".format( |
||
| 81 | count=len(benchmarks), |
||
| 82 | name="" if group is None else " {0!r}".format(group), |
||
| 83 | ).center(len(labels_line), "-"), |
||
| 84 | yellow=True, |
||
| 85 | ) |
||
| 86 | tr.write_line(labels_line) |
||
| 87 | tr.write_line("-" * len(labels_line), yellow=True) |
||
| 88 | |||
| 89 | for bench in benchmarks: |
||
| 90 | has_error = bench.get("has_error") |
||
| 91 | tr.write(bench["name"].ljust(widths["name"]), red=has_error, invert=has_error) |
||
| 92 | for prop in self.columns: |
||
| 93 | if prop in ("min", "max", "mean", "stddev", "median", "iqr"): |
||
| 94 | tr.write( |
||
| 95 | ALIGNED_NUMBER_FMT.format( |
||
| 96 | bench[prop] * adjustment, |
||
| 97 | widths[prop], |
||
| 98 | compute_baseline_scale(best[prop], bench[prop], rpadding), |
||
| 99 | rpadding |
||
| 100 | ), |
||
| 101 | green=not solo and bench[prop] == best.get(prop), |
||
| 102 | red=not solo and bench[prop] == worst.get(prop), |
||
| 103 | bold=True, |
||
| 104 | ) |
||
| 105 | else: |
||
| 106 | tr.write("{0:>{1}}".format(bench[prop], widths[prop])) |
||
| 107 | tr.write("\n") |
||
| 108 | tr.write_line("-" * len(labels_line), yellow=True) |
||
| 109 | tr.write_line("") |
||
| 110 | if self.histogram: |
||
| 111 | from .histogram import make_histogram |
||
| 112 | print(["{0[name]}".format(row) for row in benchmarks]) |
||
| 113 | if len(benchmarks) > 75: |
||
| 114 | self.logger.warn("BENCHMARK-H1", |
||
| 115 | "Group {0!r} has too many benchmarks. Only plotting 50 benchmarks.".format(group)) |
||
| 116 | benchmarks = benchmarks[:75] |
||
| 117 | |||
| 118 | output_file = make_histogram(self.histogram, group, benchmarks, unit, adjustment) |
||
| 119 | |||
| 120 | self.logger.info("Generated histogram {0}".format(output_file), bold=True) |
||
| 121 | |||
| 122 | tr.write_line("(*) Outliers: 1 Standard Deviation from Mean; " |
||
| 123 | "1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.", bold=True, black=True) |
||
| 124 | |||
| 140 |