| Conditions | 73 |
| Total Lines | 237 |
| Code Lines | 156 |
| Lines | 0 |
| Ratio | 0 % |
| Tests | 103 |
| CRAP Score | 73.0047 |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like abydos.distance._synoname.Synoname._synoname_word_approximation() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # -*- coding: utf-8 -*- |
||
| 104 | 1 | def _synoname_word_approximation( |
|
| 105 | self, src_ln, tar_ln, src_fn='', tar_fn='', features=None |
||
| 106 | ): |
||
| 107 | """Return the Synoname word approximation score for two names. |
||
| 108 | |||
| 109 | Args: |
||
| 110 | src_ln (str): Last name of the source |
||
| 111 | tar_ln (str): Last name of the target |
||
| 112 | src_fn (str): First name of the source (optional) |
||
| 113 | tar_fn (str): First name of the target (optional) |
||
| 114 | features (dict): A dict containing special features calculated |
||
| 115 | using :py:class:`fingerprint.SynonameToolcode` (optional) |
||
| 116 | |||
| 117 | Returns: |
||
| 118 | float: The word approximation score |
||
| 119 | |||
| 120 | Examples: |
||
| 121 | >>> pe = Synoname() |
||
| 122 | >>> pe._synoname_word_approximation('Smith Waterman', 'Waterman', |
||
| 123 | ... 'Tom Joe Bob', 'Tom Joe') |
||
| 124 | 0.6 |
||
| 125 | |||
| 126 | """ |
||
| 127 | 1 | if features is None: |
|
| 128 | 1 | features = {} |
|
| 129 | 1 | if 'src_specials' not in features: |
|
| 130 | 1 | features['src_specials'] = [] |
|
| 131 | 1 | if 'tar_specials' not in features: |
|
| 132 | 1 | features['tar_specials'] = [] |
|
| 133 | |||
| 134 | 1 | src_len_specials = len(features['src_specials']) |
|
| 135 | 1 | tar_len_specials = len(features['tar_specials']) |
|
| 136 | |||
| 137 | # 1 |
||
| 138 | 1 | if ('gen_conflict' in features and features['gen_conflict']) or ( |
|
| 139 | 'roman_conflict' in features and features['roman_conflict'] |
||
| 140 | ): |
||
| 141 | 1 | return 0 |
|
| 142 | |||
| 143 | # 3 & 7 |
||
| 144 | 1 | full_tar1 = ' '.join((tar_ln, tar_fn)).replace('-', ' ').strip() |
|
| 145 | 1 | for s_pos, s_type in features['tar_specials']: |
|
| 146 | 1 | if s_type == 'a': |
|
| 147 | 1 | full_tar1 = full_tar1[ |
|
| 148 | : -(1 + len(self.stc.synoname_special_table[s_pos][1])) |
||
| 149 | ] |
||
| 150 | 1 | elif s_type == 'b': |
|
| 151 | 1 | loc = ( |
|
| 152 | full_tar1.find( |
||
| 153 | ' ' + self.stc.synoname_special_table[s_pos][1] + ' ' |
||
| 154 | ) |
||
| 155 | + 1 |
||
| 156 | ) |
||
| 157 | 1 | full_tar1 = ( |
|
| 158 | full_tar1[:loc] |
||
| 159 | + full_tar1[ |
||
| 160 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 161 | ] |
||
| 162 | ) |
||
| 163 | 1 | elif s_type == 'c': |
|
| 164 | 1 | full_tar1 = full_tar1[ |
|
| 165 | 1 + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 166 | ] |
||
| 167 | |||
| 168 | 1 | full_src1 = ' '.join((src_ln, src_fn)).replace('-', ' ').strip() |
|
| 169 | 1 | for s_pos, s_type in features['src_specials']: |
|
| 170 | 1 | if s_type == 'a': |
|
| 171 | 1 | full_src1 = full_src1[ |
|
| 172 | : -(1 + len(self.stc.synoname_special_table[s_pos][1])) |
||
| 173 | ] |
||
| 174 | 1 | elif s_type == 'b': |
|
| 175 | 1 | loc = ( |
|
| 176 | full_src1.find( |
||
| 177 | ' ' + self.stc.synoname_special_table[s_pos][1] + ' ' |
||
| 178 | ) |
||
| 179 | + 1 |
||
| 180 | ) |
||
| 181 | 1 | full_src1 = ( |
|
| 182 | full_src1[:loc] |
||
| 183 | + full_src1[ |
||
| 184 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 185 | ] |
||
| 186 | ) |
||
| 187 | 1 | elif s_type == 'c': |
|
| 188 | 1 | full_src1 = full_src1[ |
|
| 189 | 1 + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 190 | ] |
||
| 191 | |||
| 192 | 1 | full_tar2 = full_tar1 |
|
| 193 | 1 | for s_pos, s_type in features['tar_specials']: |
|
| 194 | 1 | if s_type == 'd': |
|
| 195 | 1 | full_tar2 = full_tar2[ |
|
| 196 | len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 197 | ] |
||
| 198 | 1 | elif ( |
|
| 199 | s_type == 'X' |
||
| 200 | and self.stc.synoname_special_table[s_pos][1] in full_tar2 |
||
| 201 | ): |
||
| 202 | 1 | loc = full_tar2.find( |
|
| 203 | ' ' + self.stc.synoname_special_table[s_pos][1] |
||
| 204 | ) |
||
| 205 | 1 | full_tar2 = ( |
|
| 206 | full_tar2[:loc] |
||
| 207 | + full_tar2[ |
||
| 208 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 209 | ] |
||
| 210 | ) |
||
| 211 | |||
| 212 | 1 | full_src2 = full_src1 |
|
| 213 | 1 | for s_pos, s_type in features['src_specials']: |
|
| 214 | 1 | if s_type == 'd': |
|
| 215 | 1 | full_src2 = full_src2[ |
|
| 216 | len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 217 | ] |
||
| 218 | 1 | elif ( |
|
| 219 | s_type == 'X' |
||
| 220 | and self.stc.synoname_special_table[s_pos][1] in full_src2 |
||
| 221 | ): |
||
| 222 | 1 | loc = full_src2.find( |
|
| 223 | ' ' + self.stc.synoname_special_table[s_pos][1] |
||
| 224 | ) |
||
| 225 | 1 | full_src2 = ( |
|
| 226 | full_src2[:loc] |
||
| 227 | + full_src2[ |
||
| 228 | loc + len(self.stc.synoname_special_table[s_pos][1]) : |
||
| 229 | ] |
||
| 230 | ) |
||
| 231 | |||
| 232 | 1 | full_tar1 = self._synoname_strip_punct(full_tar1) |
|
| 233 | 1 | tar1_words = full_tar1.split() |
|
| 234 | 1 | tar1_num_words = len(tar1_words) |
|
| 235 | |||
| 236 | 1 | full_src1 = self._synoname_strip_punct(full_src1) |
|
| 237 | 1 | src1_words = full_src1.split() |
|
| 238 | 1 | src1_num_words = len(src1_words) |
|
| 239 | |||
| 240 | 1 | full_tar2 = self._synoname_strip_punct(full_tar2) |
|
| 241 | 1 | tar2_words = full_tar2.split() |
|
| 242 | 1 | tar2_num_words = len(tar2_words) |
|
| 243 | |||
| 244 | 1 | full_src2 = self._synoname_strip_punct(full_src2) |
|
| 245 | 1 | src2_words = full_src2.split() |
|
| 246 | 1 | src2_num_words = len(src2_words) |
|
| 247 | |||
| 248 | # 2 |
||
| 249 | 1 | if ( |
|
| 250 | src1_num_words < 2 |
||
| 251 | and src_len_specials == 0 |
||
| 252 | and src2_num_words < 2 |
||
| 253 | and tar_len_specials == 0 |
||
| 254 | ): |
||
| 255 | 1 | return 0 |
|
| 256 | |||
| 257 | # 4 |
||
| 258 | 1 | if ( |
|
| 259 | tar1_num_words == 1 |
||
| 260 | and src1_num_words == 1 |
||
| 261 | and tar1_words[0] == src1_words[0] |
||
| 262 | ): |
||
| 263 | 1 | return 1 |
|
| 264 | 1 | if tar1_num_words < 2 and tar_len_specials == 0: |
|
| 265 | 1 | return 0 |
|
| 266 | |||
| 267 | # 5 |
||
| 268 | 1 | last_found = False |
|
| 269 | 1 | for word in tar1_words: |
|
| 270 | 1 | if src_ln.endswith(word) or word + ' ' in src_ln: |
|
| 271 | 1 | last_found = True |
|
| 272 | |||
| 273 | 1 | if not last_found: |
|
| 274 | 1 | for word in src1_words: |
|
| 275 | 1 | if tar_ln.endswith(word) or word + ' ' in tar_ln: |
|
| 276 | 1 | last_found = True |
|
| 277 | |||
| 278 | # 6 |
||
| 279 | 1 | matches = 0 |
|
| 280 | 1 | if last_found: |
|
| 281 | 1 | for i, s_word in enumerate(src1_words): |
|
| 282 | 1 | for j, t_word in enumerate(tar1_words): |
|
| 283 | 1 | if s_word == t_word: |
|
| 284 | 1 | src1_words[i] = '@' |
|
| 285 | 1 | tar1_words[j] = '@' |
|
| 286 | 1 | matches += 1 |
|
| 287 | 1 | w_ratio = matches / max(tar1_num_words, src1_num_words) |
|
| 288 | 1 | if matches > 1 or ( |
|
| 289 | matches == 1 |
||
| 290 | and src1_num_words == 1 |
||
| 291 | and tar1_num_words == 1 |
||
| 292 | and (tar_len_specials > 0 or src_len_specials > 0) |
||
| 293 | ): |
||
| 294 | 1 | return w_ratio |
|
| 295 | |||
| 296 | # 8 |
||
| 297 | 1 | if ( |
|
| 298 | tar2_num_words == 1 |
||
| 299 | and src2_num_words == 1 |
||
| 300 | and tar2_words[0] == src2_words[0] |
||
| 301 | ): |
||
| 302 | 1 | return 1 |
|
| 303 | # I see no way that the following can be True if the equivalent in |
||
| 304 | # #4 was False. |
||
| 305 | if tar2_num_words < 2 and tar_len_specials == 0: # pragma: no cover |
||
| 306 | return 0 |
||
| 307 | |||
| 308 | # 9 |
||
| 309 | 1 | last_found = False |
|
| 310 | 1 | for word in tar2_words: |
|
| 311 | 1 | if src_ln.endswith(word) or word + ' ' in src_ln: |
|
| 312 | 1 | last_found = True |
|
| 313 | |||
| 314 | 1 | if not last_found: |
|
| 315 | 1 | for word in src2_words: |
|
| 316 | 1 | if tar_ln.endswith(word) or word + ' ' in tar_ln: |
|
| 317 | 1 | last_found = True |
|
| 318 | |||
| 319 | 1 | if not last_found: |
|
| 320 | 1 | return 0 |
|
| 321 | |||
| 322 | # 10 |
||
| 323 | 1 | matches = 0 |
|
| 324 | 1 | if last_found: |
|
| 325 | 1 | for i, s_word in enumerate(src2_words): |
|
| 326 | 1 | for j, t_word in enumerate(tar2_words): |
|
| 327 | 1 | if s_word == t_word: |
|
| 328 | 1 | src2_words[i] = '@' |
|
| 329 | 1 | tar2_words[j] = '@' |
|
| 330 | 1 | matches += 1 |
|
| 331 | 1 | w_ratio = matches / max(tar2_num_words, src2_num_words) |
|
| 332 | 1 | if matches > 1 or ( |
|
| 333 | matches == 1 |
||
| 334 | and src2_num_words == 1 |
||
| 335 | and tar2_num_words == 1 |
||
| 336 | and (tar_len_specials > 0 or src_len_specials > 0) |
||
| 337 | ): |
||
| 338 | return w_ratio |
||
| 339 | |||
| 340 | 1 | return 0 |
|
| 341 | |||
| 701 |