Total Complexity | 60 |
Total Lines | 259 |
Duplicated Lines | 14.29 % |
Coverage | 0% |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like fix_file_ocilclause often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | import sys |
||
2 | import os |
||
3 | import argparse |
||
4 | import subprocess |
||
5 | import jinja2 |
||
6 | import yaml |
||
7 | |||
8 | import ssg |
||
9 | |||
10 | |||
11 | def _create_profile_cache(ssg_root): |
||
12 | profile_cache = {} |
||
13 | |||
14 | product_list = ['debian8', 'fedora', 'ol7', 'opensuse', 'rhel6', 'rhel7', |
||
15 | 'sle11', 'sle12', 'ubuntu1404', 'ubuntu1604', 'wrlinux'] |
||
16 | |||
17 | for product in product_list: |
||
18 | found_obj_name = False |
||
19 | prod_profiles_dir = os.path.join(ssg_root, product, "profiles") |
||
20 | for _, _, files in os.walk(prod_profiles_dir): |
||
21 | for filename in files: |
||
22 | profile_path = os.path.join(prod_profiles_dir, filename) |
||
23 | parsed_profile = yaml.load(open(profile_path, 'r')) |
||
24 | for _obj in parsed_profile['selections']: |
||
25 | obj = _obj |
||
26 | if '=' in obj: |
||
27 | # is a var with non-default value |
||
28 | obj = _obj[:_obj.index('=')] |
||
29 | if not obj[0].isalpha(): |
||
30 | obj = obj[1:] |
||
31 | |||
32 | if obj not in profile_cache: |
||
33 | profile_cache[obj] = set() |
||
34 | |||
35 | profile_cache[obj].add(product) |
||
36 | |||
37 | return profile_cache |
||
38 | |||
39 | |||
40 | def read_file(path): |
||
41 | file_contents = open(path, 'r').read().split("\n") |
||
42 | if file_contents[-1] == '': |
||
43 | file_contents = file_contents[:-1] |
||
44 | return file_contents |
||
45 | |||
46 | |||
47 | def write_file(path, contents): |
||
48 | _f = open(path, 'w') |
||
49 | for line in contents: |
||
50 | _f.write(line + "\n") |
||
51 | |||
52 | _f.flush() |
||
53 | _f.close() |
||
54 | |||
55 | |||
56 | View Code Duplication | def find_section_lines(file_contents, sec): |
|
|
|||
57 | # Hack to find a global key ("section"/sec) in a YAML-like file. |
||
58 | # All indented lines until the next global key are included in the range. |
||
59 | # For example: |
||
60 | # |
||
61 | # 0: not_it: |
||
62 | # 1: - value |
||
63 | # 2: this_one: |
||
64 | # 3: - 2 |
||
65 | # 4: - 5 |
||
66 | # 5: |
||
67 | # 6: nor_this: |
||
68 | # |
||
69 | # for the section "this_one", the result [(2, 5)] will be returned. |
||
70 | # Note that multiple sections may exist in a file and each will be |
||
71 | # identified and returned. |
||
72 | sec_ranges = [] |
||
73 | |||
74 | sec_id = sec + ":" |
||
75 | sec_len = len(sec_id) |
||
76 | end_num = len(file_contents) |
||
77 | line_num = 0 |
||
78 | |||
79 | while line_num < end_num: |
||
80 | if len(file_contents[line_num]) >= sec_len: |
||
81 | if file_contents[line_num][0:sec_len] == sec_id: |
||
82 | begin = line_num |
||
83 | line_num += 1 |
||
84 | while line_num < end_num: |
||
85 | if len(file_contents[line_num]) > 0 and file_contents[line_num][0] != ' ': |
||
86 | break |
||
87 | line_num += 1 |
||
88 | |||
89 | end = line_num - 1 |
||
90 | sec_ranges.append((begin, end)) |
||
91 | line_num += 1 |
||
92 | return sec_ranges |
||
93 | |||
94 | |||
95 | def update_key_value(contents, key, old_value, new_value): |
||
96 | new_contents = contents[:] |
||
97 | old_line = key + ": " + old_value |
||
98 | updated = False |
||
99 | |||
100 | for line_num in range(0, len(new_contents)): |
||
101 | line = new_contents[line_num] |
||
102 | if line == old_line: |
||
103 | new_contents[line_num] = key + ": " + new_value |
||
104 | updated = True |
||
105 | break |
||
106 | |||
107 | if not updated: |
||
108 | assert(False) |
||
109 | |||
110 | return new_contents |
||
111 | |||
112 | |||
113 | def update_subkey_value(contents, key, subkey, old_value, new_value): |
||
114 | new_contents = contents[:] |
||
115 | old_line = " " + subkey + ": " + old_value |
||
116 | key_range = find_section_lines(contents, key)[0] |
||
117 | updated = False |
||
118 | |||
119 | for line_num in range(key_range[0], key_range[1] + 1): |
||
120 | line = new_contents[line_num] |
||
121 | if line == old_line: |
||
122 | new_contents[line_num] = " " + subkey + ": " |
||
123 | updated = True |
||
124 | |||
125 | if not updated: |
||
126 | print(key) |
||
127 | print(subkey) |
||
128 | print(old_value) |
||
129 | print(new_value) |
||
130 | print(contents[key_range[0]:key_range[1]+1]) |
||
131 | assert(False) |
||
132 | |||
133 | return new_contents |
||
134 | |||
135 | |||
136 | def add_key_subkey(contents, key, subkey, value): |
||
137 | new_line = " " + subkey + ": " + value |
||
138 | key_range = find_section_lines(contents, key)[0] |
||
139 | |||
140 | # Since there is always at least one line in the key_range (when [0] == [1]), |
||
141 | # it is always safe to add the new value right after the key header. |
||
142 | start_line = key_range[0] + 1 |
||
143 | new_contents = contents[0:start_line] |
||
144 | new_contents.append(new_line) |
||
145 | new_contents.extend(contents[start_line:]) |
||
146 | return new_contents |
||
147 | |||
148 | |||
149 | def get_key(line): |
||
150 | if ':' in line and line[0].isalpha(): |
||
151 | char_index = 0 |
||
152 | _ll = len(line) |
||
153 | while char_index < _ll-1 and (line[char_index].isalpha() or |
||
154 | line[char_index] == '_'): |
||
155 | char_index += 1 |
||
156 | if line[char_index] == ':': |
||
157 | return line[0:char_index] |
||
158 | return None |
||
159 | |||
160 | |||
161 | def get_sections(file_contents): |
||
162 | global_sections = set() |
||
163 | for line in file_contents: |
||
164 | key = get_key(line) |
||
165 | if key: |
||
166 | global_sections.add(key) |
||
167 | return global_sections |
||
168 | |||
169 | |||
170 | def range_has_jinja(file_contents, range): |
||
171 | return '{{' and '}}' in "\n".join(file_contents[range[0]:range[1]+1]) |
||
172 | |||
173 | |||
174 | def find_profiles(ssg_root, path, obj_name): |
||
175 | global profile_cache |
||
176 | |||
177 | if not obj_name in profile_cache: |
||
178 | return |
||
179 | |||
180 | used_products = profile_cache[obj_name] |
||
181 | |||
182 | more_than_two = len(used_products) >= 3 |
||
183 | uses_wrlinux = 'wrlinux' in used_products |
||
184 | uses_debian_like_distro = 'debian8' in used_products or 'ubuntu1404' in used_products or 'ubuntu1604' in used_products |
||
185 | uses_rhel_like_distro = 'rhel6' in used_products or 'rhel7' in used_products or 'ol7' in used_products |
||
186 | uses_sles_like_distro = 'opensuse' in used_products or 'sle11' in used_products or 'sle12' in used_products |
||
187 | uses_two_distros = (uses_debian_like_distro and uses_rhel_like_distro) or (uses_debian_like_distro and uses_sles_like_distro) or (uses_rhel_like_distro and uses_sles_like_distro) |
||
188 | |||
189 | if more_than_two or uses_wrlinux or uses_two_distros: |
||
190 | loaded_file = read_file(path) |
||
191 | sections = get_sections(loaded_file) |
||
192 | if 'prodtype' in sections: |
||
193 | sec_range = find_section_lines(loaded_file, 'prodtype') |
||
194 | assert(len(sec_range) == 1) |
||
195 | sec_range = sec_range[0] |
||
196 | |||
197 | print(path) |
||
198 | print(obj_name) |
||
199 | print(used_products) |
||
200 | print(loaded_file[sec_range[0]]) |
||
201 | new_file = loaded_file[:sec_range[0]] |
||
202 | new_file.extend(loaded_file[sec_range[1]+1:]) |
||
203 | write_file(path, new_file) |
||
204 | |||
205 | |||
206 | |||
207 | def parse_from_yaml(file_contents, lines): |
||
208 | new_file_arr = file_contents[lines[0]:lines[1] + 1] |
||
209 | new_file = "\n".join(new_file_arr) |
||
210 | return yaml.load(new_file) |
||
211 | |||
212 | |||
213 | def print_file(file_contents): |
||
214 | for line_num in range(0, len(file_contents)): |
||
215 | print("%d: %s" % (line_num, file_contents[line_num])) |
||
216 | |||
217 | |||
218 | def walk_dir(ssg_root, function): |
||
219 | product_guide = os.path.join(ssg_root, 'linux_os', 'guide') |
||
220 | _pgl = len(product_guide) |
||
221 | |||
222 | data = None |
||
223 | for root, dirs, files in os.walk(product_guide): |
||
224 | for filename in files: |
||
225 | path = os.path.join(root, filename) |
||
226 | |||
227 | obj_name = filename |
||
228 | is_rule = len(path) >= 5 and path[-5:] == '.rule' |
||
229 | is_var = len(path) >= 4 and path[-4:] == '.var' |
||
230 | |||
231 | if is_rule: |
||
232 | obj_name = filename[:-5] |
||
233 | elif is_var: |
||
234 | obj_name = filename[:-4] |
||
235 | |||
236 | if is_rule or is_var: |
||
237 | data = function(ssg_root, path, obj_name) |
||
238 | |||
239 | |||
240 | def parse_args(): |
||
241 | parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, |
||
242 | description="Utility for finding similar guide rules") |
||
243 | parser.add_argument("ssg_root", help="Path to root of ssg git directory") |
||
244 | return parser.parse_args() |
||
245 | |||
246 | |||
247 | def __main__(): |
||
248 | args = parse_args() |
||
249 | |||
250 | pc = _create_profile_cache(args.ssg_root) |
||
251 | global profile_cache |
||
252 | profile_cache = pc |
||
253 | |||
254 | walk_dir(args.ssg_root, find_profiles) |
||
255 | |||
256 | |||
257 | if __name__ == "__main__": |
||
258 | __main__() |
||
259 |