| Conditions | 32 |
| Total Lines | 200 |
| Code Lines | 94 |
| Lines | 0 |
| Ratio | 0 % |
| Tests | 81 |
| CRAP Score | 32 |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like abydos.phonetic._dolby.Dolby.encode() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # -*- coding: utf-8 -*- |
||
| 47 | 1 | def encode(self, word, max_length=-1, keep_vowels=False, vowel_char='*'): |
|
| 48 | r"""Return the Dolby Code of a name. |
||
| 49 | |||
| 50 | Parameters |
||
| 51 | ---------- |
||
| 52 | word : str |
||
| 53 | The word to transform |
||
| 54 | max_length : int |
||
| 55 | Maximum length of the returned Dolby code -- this also activates |
||
| 56 | the fixed-length code mode if it is greater than 0 |
||
| 57 | keep_vowels : bool |
||
| 58 | If True, retains all vowel markers |
||
| 59 | vowel_char : str |
||
| 60 | The vowel marker character (default to \*) |
||
| 61 | |||
| 62 | Returns |
||
| 63 | ------- |
||
| 64 | str |
||
| 65 | The Dolby Code |
||
| 66 | |||
| 67 | Examples |
||
| 68 | -------- |
||
| 69 | >>> pe = Dolby() |
||
| 70 | >>> pe.encode('Hansen') |
||
| 71 | 'H*NSN' |
||
| 72 | >>> pe.encode('Larsen') |
||
| 73 | 'L*RSN' |
||
| 74 | >>> pe.encode('Aagaard') |
||
| 75 | '*GR' |
||
| 76 | >>> pe.encode('Braaten') |
||
| 77 | 'BR*DN' |
||
| 78 | >>> pe.encode('Sandvik') |
||
| 79 | 'S*NVK' |
||
| 80 | >>> pe.encode('Hansen', max_length=6) |
||
| 81 | 'H*NS*N' |
||
| 82 | >>> pe.encode('Larsen', max_length=6) |
||
| 83 | 'L*RS*N' |
||
| 84 | >>> pe.encode('Aagaard', max_length=6) |
||
| 85 | '*G*R ' |
||
| 86 | >>> pe.encode('Braaten', max_length=6) |
||
| 87 | 'BR*D*N' |
||
| 88 | >>> pe.encode('Sandvik', max_length=6) |
||
| 89 | 'S*NF*K' |
||
| 90 | |||
| 91 | >>> pe.encode('Smith') |
||
| 92 | 'SM*D' |
||
| 93 | >>> pe.encode('Waters') |
||
| 94 | 'W*DRS' |
||
| 95 | >>> pe.encode('James') |
||
| 96 | 'J*MS' |
||
| 97 | >>> pe.encode('Schmidt') |
||
| 98 | 'SM*D' |
||
| 99 | >>> pe.encode('Ashcroft') |
||
| 100 | '*SKRFD' |
||
| 101 | >>> pe.encode('Smith', max_length=6) |
||
| 102 | 'SM*D ' |
||
| 103 | >>> pe.encode('Waters', max_length=6) |
||
| 104 | 'W*D*RS' |
||
| 105 | >>> pe.encode('James', max_length=6) |
||
| 106 | 'J*M*S ' |
||
| 107 | >>> pe.encode('Schmidt', max_length=6) |
||
| 108 | 'SM*D ' |
||
| 109 | >>> pe.encode('Ashcroft', max_length=6) |
||
| 110 | '*SKRFD' |
||
| 111 | |||
| 112 | """ |
||
| 113 | # uppercase, normalize, decompose, and filter non-A-Z out |
||
| 114 | 1 | word = unicode_normalize('NFKD', text_type(word.upper())) |
|
| 115 | 1 | word = word.replace('ß', 'SS') |
|
| 116 | 1 | word = ''.join(c for c in word if c in self._uc_set) |
|
| 117 | |||
| 118 | # Rule 1 (FL2) |
||
| 119 | 1 | if word[:3] in {'MCG', 'MAG', 'MAC'}: |
|
| 120 | 1 | word = 'MK' + word[3:] |
|
| 121 | 1 | elif word[:2] == 'MC': |
|
| 122 | 1 | word = 'MK' + word[2:] |
|
| 123 | |||
| 124 | # Rule 2 (FL3) |
||
| 125 | 1 | pos = len(word) - 2 |
|
| 126 | 1 | while pos > -1: |
|
| 127 | 1 | if word[pos : pos + 2] in { |
|
| 128 | 'DT', |
||
| 129 | 'LD', |
||
| 130 | 'ND', |
||
| 131 | 'NT', |
||
| 132 | 'RC', |
||
| 133 | 'RD', |
||
| 134 | 'RT', |
||
| 135 | 'SC', |
||
| 136 | 'SK', |
||
| 137 | 'ST', |
||
| 138 | }: |
||
| 139 | 1 | word = word[: pos + 1] + word[pos + 2 :] |
|
| 140 | 1 | pos += 1 |
|
| 141 | 1 | pos -= 1 |
|
| 142 | |||
| 143 | # Rule 3 (FL4) |
||
| 144 | # Although the rule indicates "after the first letter", the test cases |
||
| 145 | # make it clear that these apply to the first letter also. |
||
| 146 | 1 | word = word.replace('X', 'KS') |
|
| 147 | 1 | word = word.replace('CE', 'SE') |
|
| 148 | 1 | word = word.replace('CI', 'SI') |
|
| 149 | 1 | word = word.replace('CY', 'SI') |
|
| 150 | |||
| 151 | # not in the rule set, but they seem to have intended it |
||
| 152 | 1 | word = word.replace('TCH', 'CH') |
|
| 153 | |||
| 154 | 1 | pos = word.find('CH', 1) |
|
| 155 | 1 | while pos != -1: |
|
| 156 | 1 | if word[pos - 1 : pos] not in self._uc_vy_set: |
|
| 157 | 1 | word = word[:pos] + 'S' + word[pos + 1 :] |
|
| 158 | 1 | pos = word.find('CH', pos + 1) |
|
| 159 | |||
| 160 | 1 | word = word.replace('C', 'K') |
|
| 161 | 1 | word = word.replace('Z', 'S') |
|
| 162 | |||
| 163 | 1 | word = word.replace('WR', 'R') |
|
| 164 | 1 | word = word.replace('DG', 'G') |
|
| 165 | 1 | word = word.replace('QU', 'K') |
|
| 166 | 1 | word = word.replace('T', 'D') |
|
| 167 | 1 | word = word.replace('PH', 'F') |
|
| 168 | |||
| 169 | # Rule 4 (FL5) |
||
| 170 | # Although the rule indicates "after the first letter", the test cases |
||
| 171 | # make it clear that these apply to the first letter also. |
||
| 172 | 1 | pos = word.find('K', 0) |
|
| 173 | 1 | while pos != -1: |
|
| 174 | 1 | if pos > 1 and word[pos - 1 : pos] not in self._uc_vy_set | { |
|
| 175 | 'L', |
||
| 176 | 'N', |
||
| 177 | 'R', |
||
| 178 | }: |
||
| 179 | 1 | word = word[: pos - 1] + word[pos:] |
|
| 180 | 1 | pos -= 1 |
|
| 181 | 1 | pos = word.find('K', pos + 1) |
|
| 182 | |||
| 183 | # Rule FL6 |
||
| 184 | 1 | if max_length > 0 and word[-1:] == 'E': |
|
| 185 | 1 | word = word[:-1] |
|
| 186 | |||
| 187 | # Rule 5 (FL7) |
||
| 188 | 1 | word = self._delete_consecutive_repeats(word) |
|
| 189 | |||
| 190 | # Rule 6 (FL8) |
||
| 191 | 1 | if word[:2] == 'PF': |
|
| 192 | 1 | word = word[1:] |
|
| 193 | 1 | if word[-2:] == 'PF': |
|
| 194 | 1 | word = word[:-1] |
|
| 195 | 1 | elif word[-2:] == 'GH': |
|
| 196 | 1 | if word[-3:-2] in self._uc_vy_set: |
|
| 197 | 1 | word = word[:-2] + 'F' |
|
| 198 | else: |
||
| 199 | 1 | word = word[:-2] + 'G' |
|
| 200 | 1 | word = word.replace('GH', '') |
|
| 201 | |||
| 202 | # Rule FL9 |
||
| 203 | 1 | if max_length > 0: |
|
| 204 | 1 | word = word.replace('V', 'F') |
|
| 205 | |||
| 206 | # Rules 7-9 (FL10-FL12) |
||
| 207 | 1 | first = 1 + (1 if max_length > 0 else 0) |
|
| 208 | 1 | code = '' |
|
| 209 | 1 | for pos, char in enumerate(word): |
|
| 210 | 1 | if char in self._uc_vy_set: |
|
| 211 | 1 | if first or keep_vowels: |
|
| 212 | 1 | code += vowel_char |
|
| 213 | 1 | first -= 1 |
|
| 214 | 1 | elif pos > 0 and char in {'W', 'H'}: |
|
| 215 | 1 | continue |
|
| 216 | else: |
||
| 217 | 1 | code += char |
|
| 218 | |||
| 219 | 1 | if max_length > 0: |
|
| 220 | # Rule FL13 |
||
| 221 | 1 | if len(code) > max_length and code[-1:] == 'S': |
|
| 222 | 1 | code = code[:-1] |
|
| 223 | 1 | if keep_vowels: |
|
| 224 | 1 | code = code[:max_length] |
|
| 225 | else: |
||
| 226 | # Rule FL14 |
||
| 227 | 1 | code = code[: max_length + 2] |
|
| 228 | # Rule FL15 |
||
| 229 | 1 | while len(code) > max_length: |
|
| 230 | 1 | vowels = len(code) - max_length |
|
| 231 | 1 | excess = vowels - 1 |
|
| 232 | 1 | word = code |
|
| 233 | 1 | code = '' |
|
| 234 | 1 | for char in word: |
|
| 235 | 1 | if char == vowel_char: |
|
| 236 | 1 | if vowels: |
|
| 237 | 1 | code += char |
|
| 238 | 1 | vowels -= 1 |
|
| 239 | else: |
||
| 240 | 1 | code += char |
|
| 241 | 1 | code = code[: max_length + excess] |
|
| 242 | |||
| 243 | # Rule FL16 |
||
| 244 | 1 | code += ' ' * (max_length - len(code)) |
|
| 245 | |||
| 246 | 1 | return code |
|
| 247 | |||
| 323 |