1
|
|
|
#!/usr/bin/python3 |
2
|
|
|
"""Compare and present build times to user and generate an HTML interactive graph""" |
3
|
|
|
import sys |
4
|
|
|
import argparse |
5
|
|
|
|
6
|
|
|
|
7
|
|
|
def load_log_file(file) -> dict: |
8
|
|
|
"""Loads Targets and their durations from ninja logfile `file` and returns them in a dict""" |
9
|
|
|
|
10
|
|
|
with open(file, 'r') as file_pointer: |
11
|
|
|
lines = file_pointer.read().splitlines() |
12
|
|
|
|
13
|
|
|
# {Target: duration} dict |
14
|
|
|
target_duration_dict = {} |
15
|
|
|
|
16
|
|
|
# an issue appeared with new versions of cmake where the targets are duplicated with a relative |
17
|
|
|
# and absolute path and therefore they must be filtered here; filters comments too |
18
|
|
|
lines = [line for line in lines if not line.strip().split()[3].startswith('/') and not |
19
|
|
|
line.startswith('#')] |
20
|
|
|
splitLines = [line.strip().split() for line in lines] |
21
|
|
|
|
22
|
|
|
# calculate target compilation duration and add it to dict |
23
|
|
|
hash_target_duration_dict = {} |
24
|
|
|
for line in splitLines: |
25
|
|
|
duration = int(line[1]) - int(line[0]) |
26
|
|
|
hash = line[2] |
27
|
|
|
target = line[3] |
28
|
|
|
|
29
|
|
|
# filter out lines with duplicate times and concatenate target names |
30
|
|
|
if hash not in hash_target_duration_dict: |
31
|
|
|
target_duration_dict[target] = duration |
32
|
|
|
# add target,duration with new hash |
33
|
|
|
hash_target_duration_dict[hash] = (target, duration) |
34
|
|
|
else: |
35
|
|
|
previous_target = hash_target_duration_dict[hash][0] |
36
|
|
|
# concatenate previous target with same hash to this one |
37
|
|
|
concated_target = previous_target + ";" + target |
38
|
|
|
# remove old target entry and add concatenated target = duraiton |
39
|
|
|
target_duration_dict[concated_target] = \ |
40
|
|
|
target_duration_dict.pop(previous_target) |
41
|
|
|
# update target name in hash dict |
42
|
|
|
hash_target_duration_dict[hash] = (concated_target, duration) |
43
|
|
|
|
44
|
|
|
return target_duration_dict |
45
|
|
|
|
46
|
|
|
|
47
|
|
|
def format_time(time): |
48
|
|
|
"""Converts a time into a human-readable format""" |
49
|
|
|
|
50
|
|
|
time /= 1000 |
51
|
|
|
if time < 60: |
52
|
|
|
return '%.1fs' % time |
53
|
|
|
if time < 60 * 60: |
54
|
|
|
return '%dm%.1fs' % (time / 60, time % 60) |
55
|
|
|
return '%dh%dm%.1fs' % (time / (60 * 60), time % (60 * 60) / 60, time % 60) |
56
|
|
|
|
57
|
|
|
|
58
|
|
|
def generate_webtreemap(current_dict: dict, baseline_dict: dict, logfile: str) -> None: |
59
|
|
|
"""Create file for webtreemap to generate an HTML from; if target is new, append _NEW""" |
60
|
|
|
with open(logfile + ".webtreemap", 'w') as file_pointer: |
61
|
|
|
for target in current_dict.keys(): |
62
|
|
|
new_tag = '' |
63
|
|
|
if baseline_dict and target not in baseline_dict.keys(): |
64
|
|
|
new_tag = '_NEW' |
65
|
|
|
|
66
|
|
|
file_pointer.write(str(current_dict[target]) + ' ' + target + '_' |
67
|
|
|
+ format_time(current_dict[target]) + new_tag + '\n') |
68
|
|
|
|
69
|
|
|
|
70
|
|
|
def get_total_time(target_duration_dict: dict) -> int: |
71
|
|
|
"""Return sum of durations for all targets in dict""" |
72
|
|
|
total_time = 0 |
73
|
|
|
for target in target_duration_dict.keys(): |
74
|
|
|
total_time += target_duration_dict[target] |
75
|
|
|
return total_time |
76
|
|
|
|
77
|
|
|
|
78
|
|
|
def get_total_time_intersect(target_duration_dict_a: dict, target_duration_dict_b: dict) -> int: |
79
|
|
|
"""Return sum of durations for targets in A that are also in B""" |
80
|
|
|
total_time = 0 |
81
|
|
|
for target in target_duration_dict_a.keys(): |
82
|
|
|
if target in target_duration_dict_b.keys(): |
83
|
|
|
total_time += target_duration_dict_a[target] |
84
|
|
|
return total_time |
85
|
|
|
|
86
|
|
|
|
87
|
|
|
def print_report(current_dict: dict, baseline_dict: dict = None) -> None: |
88
|
|
|
"""Print report with results of profiling to stdout""" |
89
|
|
|
|
90
|
|
|
# If the targets/outputfiles have changed between baseline and current, we are using |
91
|
|
|
# total_time_intersect to calculate delta (ratio of durations of targets) instead of total_time |
92
|
|
|
if baseline_dict and baseline_dict.keys() != current_dict.keys(): |
93
|
|
|
msg = ("Warning: the targets in the current logfile differ from those in the baseline" |
94
|
|
|
"logfile; therefore the time and time percentage deltas TD and %TD for each target" |
95
|
|
|
"as well as for the entire build are calculated without taking the added/removed" |
96
|
|
|
"targets into account, but the total build time at the end does take them into" |
97
|
|
|
"account. If the added/removed targets modify the behavior of targets in both" |
98
|
|
|
"logfiles, the D delta may not make sense.\n-----\n") |
99
|
|
|
print(msg) |
100
|
|
|
target_mismatch = True |
101
|
|
|
total_time_current_intersect = get_total_time_intersect(current_dict, baseline_dict) |
102
|
|
|
total_time_baseline_intesect = get_total_time_intersect(baseline_dict, current_dict) |
103
|
|
|
else: |
104
|
|
|
target_mismatch = False |
105
|
|
|
|
106
|
|
|
header = [f'{"Target:":60}', f"{'%':4}", f"{'D':5}", f"{'T':8}", |
107
|
|
|
f"{'TD':8}", f"{'%TD':5}", "Note"] |
108
|
|
|
print(' | '.join(header)) |
109
|
|
|
|
110
|
|
|
total_time_current = get_total_time(current_dict) |
111
|
|
|
if baseline_dict: |
112
|
|
|
total_time_baseline = get_total_time(baseline_dict) |
113
|
|
|
|
114
|
|
|
# sort targets/outputfiles by % taken of build time |
115
|
|
|
current_dict = dict(sorted(current_dict.items(), key=lambda item: item[1], reverse=True)) |
116
|
|
|
|
117
|
|
|
for target in current_dict.keys(): |
118
|
|
|
# percentage of build time that the target took |
119
|
|
|
perc = current_dict[target]/total_time_current * 100 |
120
|
|
|
|
121
|
|
|
# difference between perc in current and in baseline |
122
|
|
|
delta = 0 |
123
|
|
|
if baseline_dict: |
124
|
|
|
if target_mismatch: |
125
|
|
|
if target in baseline_dict.keys(): |
126
|
|
|
delta = current_dict[target]/total_time_current_intersect * 100 - \ |
|
|
|
|
127
|
|
|
baseline_dict[target]/total_time_baseline_intesect * 100 |
|
|
|
|
128
|
|
|
else: |
129
|
|
|
delta = perc - (baseline_dict[target]/total_time_baseline * 100) |
|
|
|
|
130
|
|
|
if abs(delta) < 0.1: |
131
|
|
|
delta = 0 |
132
|
|
|
|
133
|
|
|
# time is the formatted build time of the target |
134
|
|
|
time = format_time(current_dict[target]) |
135
|
|
|
|
136
|
|
|
# time_delta is the formatted time difference between current and baseline |
137
|
|
|
if baseline_dict and target in baseline_dict.keys(): |
138
|
|
|
time_delta = current_dict[target] - baseline_dict[target] |
139
|
|
|
if abs(time_delta) < 60: |
140
|
|
|
time_delta = 0 |
141
|
|
|
time_delta = format_time(time_delta) |
142
|
|
|
else: |
143
|
|
|
time_delta = 0 |
144
|
|
|
|
145
|
|
|
# perc_time_delta is a percentage difference of before and after build times |
146
|
|
|
if baseline_dict and target in baseline_dict.keys(): |
147
|
|
|
perc_time_delta = (current_dict[target]/baseline_dict[target]) * 100 - 100 |
148
|
|
|
else: |
149
|
|
|
perc_time_delta = 0 |
150
|
|
|
|
151
|
|
|
line = [f'{target:60}', f"{perc:4.1f}", f"{delta:5.1f}", f"{time:>8}", |
152
|
|
|
f"{time_delta:>8}", f"{perc_time_delta:5.1f}"] |
153
|
|
|
# if target was not in baseline, append note |
154
|
|
|
if baseline_dict and target not in baseline_dict.keys(): |
155
|
|
|
line.append("Not in baseline") |
156
|
|
|
|
157
|
|
|
# if target has multiple output files, print them on separate lines |
158
|
|
|
# (times only on the last line) |
159
|
|
|
if(';' in target): |
160
|
|
|
print("\n".join(target.rsplit(';', 1)[0].split(';'))) |
161
|
|
|
split_target = target.rsplit(';', 1)[1] |
162
|
|
|
line[0] = f'{split_target:60}' |
163
|
|
|
print(' | '.join(line)) |
164
|
|
|
|
165
|
|
|
# Print time and % delta of the whole build time |
166
|
|
|
if baseline_dict: |
167
|
|
|
# total_perc_time_delta is the percentage change of build times between current and baseline |
168
|
|
|
total_time_delta = total_time_current - total_time_baseline |
169
|
|
|
if abs(total_time_delta) < 60: |
170
|
|
|
total_time_delta = 0 |
171
|
|
|
total_time_delta = format_time(total_time_delta) |
172
|
|
|
total_perc_time_delta = (total_time_current / total_time_baseline) * 100 - 100 |
173
|
|
|
line = ["-----\nTotal time:", format_time(total_time_current), |
174
|
|
|
"| TD", f'{total_time_delta:>8}', "| %TD", f'{total_perc_time_delta:+5.1f}'] |
175
|
|
|
# if there are different targets in current and baseline log, add intersect deltas, |
176
|
|
|
# which compare build times while omitting conficting build targets |
177
|
|
|
if target_mismatch: |
178
|
|
|
intersect_time_delta = total_time_current_intersect - total_time_baseline_intesect |
179
|
|
|
if abs(intersect_time_delta) < 60: |
180
|
|
|
intersect_time_delta = 0 |
181
|
|
|
intersect_time_delta = format_time(intersect_time_delta) |
182
|
|
|
line.append(f'| intersect TD {intersect_time_delta:>8}') |
183
|
|
|
intersect_perc_time_delta = (total_time_current_intersect / |
184
|
|
|
total_time_baseline_intesect) * 100 - 100 |
185
|
|
|
line.append(f'| intersect %TD {intersect_perc_time_delta:+5.1f}') |
186
|
|
|
print(' '.join(line)) |
187
|
|
|
else: |
188
|
|
|
print("-----\nTotal time:", format_time(total_time_current)) |
189
|
|
|
|
190
|
|
|
# Print targets which are present in baseline but not in current log |
191
|
|
|
if baseline_dict: |
192
|
|
|
removed = [target for target in baseline_dict.keys() if target not in current_dict.keys()] |
193
|
|
|
print("-----\nTargets omitted from baseline:\n", '\n'.join(removed)) |
194
|
|
|
|
195
|
|
|
|
196
|
|
|
def main() -> None: |
197
|
|
|
"""Parse args, check for python version, then generate webtreemap HTML and print report""" |
198
|
|
|
|
199
|
|
|
# Dict key order used by print_report only specified in 3.7.0+ |
200
|
|
|
if sys.version_info < (3, 7, 0): |
201
|
|
|
sys.stderr.write("You need python 3.7 or later to run this script\n") |
202
|
|
|
sys.exit(1) |
203
|
|
|
|
204
|
|
|
# Parse arguments |
205
|
|
|
parser = argparse.ArgumentParser(description='Create .webtreemap and print profiling report', |
206
|
|
|
usage='./build_profiler_report.py <logflie> [--baseline]') |
207
|
|
|
parser.add_argument('logfile', type=str, help='Ninja logfile to compare against baseline') |
208
|
|
|
parser.add_argument('--baseline', dest='skipBaseline', action='store_const', |
209
|
|
|
const=True, default=False, help='Do not compare logfile with baseline log \ |
210
|
|
|
(default: compare baseline logfile with current logfile)') |
211
|
|
|
|
212
|
|
|
args = parser.parse_args() |
213
|
|
|
|
214
|
|
|
if args.skipBaseline: |
215
|
|
|
baseline_dict = None |
216
|
|
|
else: |
217
|
|
|
baseline_dict = load_log_file("0.ninja_log") |
218
|
|
|
|
219
|
|
|
logfile = sys.argv[1] |
220
|
|
|
current_dict = load_log_file(sys.argv[1]) |
221
|
|
|
|
222
|
|
|
generate_webtreemap(current_dict, baseline_dict, logfile) |
223
|
|
|
print_report(current_dict, baseline_dict) |
224
|
|
|
|
225
|
|
|
|
226
|
|
|
if __name__ == "__main__": |
227
|
|
|
main() |
228
|
|
|
|