| Conditions | 73 |
| Total Lines | 233 |
| Code Lines | 156 |
| Lines | 0 |
| Ratio | 0 % |
| Tests | 103 |
| CRAP Score | 73.0047 |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like abydos.distance._synoname.Synoname._synoname_word_approximation() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # -*- coding: utf-8 -*- |
||
| 99 | 1 | def _synoname_word_approximation( |
|
| 100 | self, src_ln, tar_ln, src_fn='', tar_fn='', features=None |
||
| 101 | ): |
||
| 102 | """Return the Synoname word approximation score for two names. |
||
| 103 | |||
| 104 | :param str src_ln: last name of the source |
||
| 105 | :param str tar_ln: last name of the target |
||
| 106 | :param str src_fn: first name of the source (optional) |
||
| 107 | :param str tar_fn: first name of the target (optional) |
||
| 108 | :param features: a dict containing special features calculated using |
||
| 109 | fingerprint.SynonameToolcode (optional) |
||
| 110 | :returns: The word approximation score |
||
| 111 | :rtype: float |
||
| 112 | |||
| 113 | >>> pe = Synoname() |
||
| 114 | >>> pe._synoname_word_approximation('Smith Waterman', 'Waterman', |
||
| 115 | ... 'Tom Joe Bob', 'Tom Joe') |
||
| 116 | 0.6 |
||
| 117 | """ |
||
| 118 | 1 | if features is None: |
|
| 119 | 1 | features = {} |
|
| 120 | 1 | if 'src_specials' not in features: |
|
| 121 | 1 | features['src_specials'] = [] |
|
| 122 | 1 | if 'tar_specials' not in features: |
|
| 123 | 1 | features['tar_specials'] = [] |
|
| 124 | |||
| 125 | 1 | src_len_specials = len(features['src_specials']) |
|
| 126 | 1 | tar_len_specials = len(features['tar_specials']) |
|
| 127 | |||
| 128 | # 1 |
||
| 129 | 1 | if ('gen_conflict' in features and features['gen_conflict']) or ( |
|
| 130 | 'roman_conflict' in features and features['roman_conflict'] |
||
| 131 | ): |
||
| 132 | 1 | return 0 |
|
| 133 | |||
| 134 | # 3 & 7 |
||
| 135 | 1 | full_tar1 = ' '.join((tar_ln, tar_fn)).replace('-', ' ').strip() |
|
| 136 | 1 | for s_pos, s_type in features['tar_specials']: |
|
| 137 | 1 | if s_type == 'a': |
|
| 138 | 1 | full_tar1 = full_tar1[ |
|
| 139 | : -(1 + len(self.stc.synoname_special_table[s_pos][1])) |
||
| 140 | ] |
||
| 141 | 1 | elif s_type == 'b': |
|
| 142 | 1 | loc = ( |
|
| 143 | full_tar1.find( |
||
| 144 | ' ' + self.stc.synoname_special_table[s_pos][1] + ' ' |
||
| 145 | ) |
||
| 146 | + 1 |
||
| 147 | ) |
||
| 148 | 1 | full_tar1 = ( |
|
| 149 | full_tar1[:loc] |
||
| 150 | + full_tar1[ |
||
| 151 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 152 | ] |
||
| 153 | ) |
||
| 154 | 1 | elif s_type == 'c': |
|
| 155 | 1 | full_tar1 = full_tar1[ |
|
| 156 | 1 + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 157 | ] |
||
| 158 | |||
| 159 | 1 | full_src1 = ' '.join((src_ln, src_fn)).replace('-', ' ').strip() |
|
| 160 | 1 | for s_pos, s_type in features['src_specials']: |
|
| 161 | 1 | if s_type == 'a': |
|
| 162 | 1 | full_src1 = full_src1[ |
|
| 163 | : -(1 + len(self.stc.synoname_special_table[s_pos][1])) |
||
| 164 | ] |
||
| 165 | 1 | elif s_type == 'b': |
|
| 166 | 1 | loc = ( |
|
| 167 | full_src1.find( |
||
| 168 | ' ' + self.stc.synoname_special_table[s_pos][1] + ' ' |
||
| 169 | ) |
||
| 170 | + 1 |
||
| 171 | ) |
||
| 172 | 1 | full_src1 = ( |
|
| 173 | full_src1[:loc] |
||
| 174 | + full_src1[ |
||
| 175 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 176 | ] |
||
| 177 | ) |
||
| 178 | 1 | elif s_type == 'c': |
|
| 179 | 1 | full_src1 = full_src1[ |
|
| 180 | 1 + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 181 | ] |
||
| 182 | |||
| 183 | 1 | full_tar2 = full_tar1 |
|
| 184 | 1 | for s_pos, s_type in features['tar_specials']: |
|
| 185 | 1 | if s_type == 'd': |
|
| 186 | 1 | full_tar2 = full_tar2[ |
|
| 187 | len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 188 | ] |
||
| 189 | 1 | elif ( |
|
| 190 | s_type == 'X' |
||
| 191 | and self.stc.synoname_special_table[s_pos][1] in full_tar2 |
||
| 192 | ): |
||
| 193 | 1 | loc = full_tar2.find( |
|
| 194 | ' ' + self.stc.synoname_special_table[s_pos][1] |
||
| 195 | ) |
||
| 196 | 1 | full_tar2 = ( |
|
| 197 | full_tar2[:loc] |
||
| 198 | + full_tar2[ |
||
| 199 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 200 | ] |
||
| 201 | ) |
||
| 202 | |||
| 203 | 1 | full_src2 = full_src1 |
|
| 204 | 1 | for s_pos, s_type in features['src_specials']: |
|
| 205 | 1 | if s_type == 'd': |
|
| 206 | 1 | full_src2 = full_src2[ |
|
| 207 | len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 208 | ] |
||
| 209 | 1 | elif ( |
|
| 210 | s_type == 'X' |
||
| 211 | and self.stc.synoname_special_table[s_pos][1] in full_src2 |
||
| 212 | ): |
||
| 213 | 1 | loc = full_src2.find( |
|
| 214 | ' ' + self.stc.synoname_special_table[s_pos][1] |
||
| 215 | ) |
||
| 216 | 1 | full_src2 = ( |
|
| 217 | full_src2[:loc] |
||
| 218 | + full_src2[ |
||
| 219 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 220 | ] |
||
| 221 | ) |
||
| 222 | |||
| 223 | 1 | full_tar1 = self._synoname_strip_punct(full_tar1) |
|
| 224 | 1 | tar1_words = full_tar1.split() |
|
| 225 | 1 | tar1_num_words = len(tar1_words) |
|
| 226 | |||
| 227 | 1 | full_src1 = self._synoname_strip_punct(full_src1) |
|
| 228 | 1 | src1_words = full_src1.split() |
|
| 229 | 1 | src1_num_words = len(src1_words) |
|
| 230 | |||
| 231 | 1 | full_tar2 = self._synoname_strip_punct(full_tar2) |
|
| 232 | 1 | tar2_words = full_tar2.split() |
|
| 233 | 1 | tar2_num_words = len(tar2_words) |
|
| 234 | |||
| 235 | 1 | full_src2 = self._synoname_strip_punct(full_src2) |
|
| 236 | 1 | src2_words = full_src2.split() |
|
| 237 | 1 | src2_num_words = len(src2_words) |
|
| 238 | |||
| 239 | # 2 |
||
| 240 | 1 | if ( |
|
| 241 | src1_num_words < 2 |
||
| 242 | and src_len_specials == 0 |
||
| 243 | and src2_num_words < 2 |
||
| 244 | and tar_len_specials == 0 |
||
| 245 | ): |
||
| 246 | 1 | return 0 |
|
| 247 | |||
| 248 | # 4 |
||
| 249 | 1 | if ( |
|
| 250 | tar1_num_words == 1 |
||
| 251 | and src1_num_words == 1 |
||
| 252 | and tar1_words[0] == src1_words[0] |
||
| 253 | ): |
||
| 254 | 1 | return 1 |
|
| 255 | 1 | if tar1_num_words < 2 and tar_len_specials == 0: |
|
| 256 | 1 | return 0 |
|
| 257 | |||
| 258 | # 5 |
||
| 259 | 1 | last_found = False |
|
| 260 | 1 | for word in tar1_words: |
|
| 261 | 1 | if src_ln.endswith(word) or word + ' ' in src_ln: |
|
| 262 | 1 | last_found = True |
|
| 263 | |||
| 264 | 1 | if not last_found: |
|
| 265 | 1 | for word in src1_words: |
|
| 266 | 1 | if tar_ln.endswith(word) or word + ' ' in tar_ln: |
|
| 267 | 1 | last_found = True |
|
| 268 | |||
| 269 | # 6 |
||
| 270 | 1 | matches = 0 |
|
| 271 | 1 | if last_found: |
|
| 272 | 1 | for i, s_word in enumerate(src1_words): |
|
| 273 | 1 | for j, t_word in enumerate(tar1_words): |
|
| 274 | 1 | if s_word == t_word: |
|
| 275 | 1 | src1_words[i] = '@' |
|
| 276 | 1 | tar1_words[j] = '@' |
|
| 277 | 1 | matches += 1 |
|
| 278 | 1 | w_ratio = matches / max(tar1_num_words, src1_num_words) |
|
| 279 | 1 | if matches > 1 or ( |
|
| 280 | matches == 1 |
||
| 281 | and src1_num_words == 1 |
||
| 282 | and tar1_num_words == 1 |
||
| 283 | and (tar_len_specials > 0 or src_len_specials > 0) |
||
| 284 | ): |
||
| 285 | 1 | return w_ratio |
|
| 286 | |||
| 287 | # 8 |
||
| 288 | 1 | if ( |
|
| 289 | tar2_num_words == 1 |
||
| 290 | and src2_num_words == 1 |
||
| 291 | and tar2_words[0] == src2_words[0] |
||
| 292 | ): |
||
| 293 | 1 | return 1 |
|
| 294 | # I see no way that the following can be True if the equivalent in |
||
| 295 | # #4 was False. |
||
| 296 | if tar2_num_words < 2 and tar_len_specials == 0: # pragma: no cover |
||
| 297 | return 0 |
||
| 298 | |||
| 299 | # 9 |
||
| 300 | 1 | last_found = False |
|
| 301 | 1 | for word in tar2_words: |
|
| 302 | 1 | if src_ln.endswith(word) or word + ' ' in src_ln: |
|
| 303 | 1 | last_found = True |
|
| 304 | |||
| 305 | 1 | if not last_found: |
|
| 306 | 1 | for word in src2_words: |
|
| 307 | 1 | if tar_ln.endswith(word) or word + ' ' in tar_ln: |
|
| 308 | 1 | last_found = True |
|
| 309 | |||
| 310 | 1 | if not last_found: |
|
| 311 | 1 | return 0 |
|
| 312 | |||
| 313 | # 10 |
||
| 314 | 1 | matches = 0 |
|
| 315 | 1 | if last_found: |
|
| 316 | 1 | for i, s_word in enumerate(src2_words): |
|
| 317 | 1 | for j, t_word in enumerate(tar2_words): |
|
| 318 | 1 | if s_word == t_word: |
|
| 319 | 1 | src2_words[i] = '@' |
|
| 320 | 1 | tar2_words[j] = '@' |
|
| 321 | 1 | matches += 1 |
|
| 322 | 1 | w_ratio = matches / max(tar2_num_words, src2_num_words) |
|
| 323 | 1 | if matches > 1 or ( |
|
| 324 | matches == 1 |
||
| 325 | and src2_num_words == 1 |
||
| 326 | and tar2_num_words == 1 |
||
| 327 | and (tar_len_specials > 0 or src_len_specials > 0) |
||
| 328 | ): |
||
| 329 | return w_ratio |
||
| 330 | |||
| 331 | 1 | return 0 |
|
| 332 | |||
| 692 |