| Conditions | 29 |
| Total Lines | 106 |
| Code Lines | 69 |
| Lines | 0 |
| Ratio | 0 % |
| Tests | 56 |
| CRAP Score | 29 |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like abydos.distance._discounted_levenshtein.DiscountedLevenshtein._alignment_matrix() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # Copyright 2019-2020 by Christopher C. Little. |
||
| 120 | 1 | def _alignment_matrix( |
|
| 121 | self, src: str, tar: str, backtrace: bool = True |
||
| 122 | 1 | ) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]: |
|
| 123 | """Return the Levenshtein alignment matrix. |
||
| 124 | 1 | ||
| 125 | Parameters |
||
| 126 | 1 | ---------- |
|
| 127 | src : str |
||
| 128 | 1 | Source string for comparison |
|
| 129 | tar : str |
||
| 130 | 1 | Target string for comparison |
|
| 131 | backtrace : bool |
||
| 132 | Return the backtrace matrix as well |
||
| 133 | |||
| 134 | Returns |
||
| 135 | ------- |
||
| 136 | numpy.ndarray or tuple(numpy.ndarray, numpy.ndarray) |
||
| 137 | The alignment matrix and (optionally) the backtrace matrix |
||
| 138 | |||
| 139 | |||
| 140 | .. versionadded:: 0.4.1 |
||
| 141 | |||
| 142 | """ |
||
| 143 | src_len = len(src) |
||
| 144 | tar_len = len(tar) |
||
| 145 | |||
| 146 | if self._discount_from == 'coda': |
||
| 147 | discount_from = [0, 0] |
||
| 148 | |||
| 149 | src_voc = src.lower() |
||
| 150 | for i in range(len(src_voc)): |
||
| 151 | 1 | if src_voc[i] in self._vowels: |
|
| 152 | 1 | discount_from[0] = i |
|
| 153 | break |
||
| 154 | 1 | for i in range(discount_from[0], len(src_voc)): |
|
| 155 | 1 | if src_voc[i] not in self._vowels: |
|
| 156 | discount_from[0] = i |
||
| 157 | 1 | break |
|
| 158 | 1 | else: |
|
| 159 | 1 | discount_from[0] += 1 |
|
| 160 | 1 | ||
| 161 | 1 | tar_voc = tar.lower() |
|
| 162 | 1 | for i in range(len(tar_voc)): |
|
| 163 | 1 | if tar_voc[i] in self._vowels: |
|
| 164 | 1 | discount_from[1] = i |
|
| 165 | 1 | break |
|
| 166 | for i in range(discount_from[1], len(tar_voc)): |
||
| 167 | 1 | if tar_voc[i] not in self._vowels: |
|
| 168 | discount_from[1] = i |
||
| 169 | 1 | break |
|
| 170 | 1 | else: |
|
| 171 | 1 | discount_from[1] += 1 |
|
| 172 | 1 | ||
| 173 | 1 | elif isinstance(self._discount_from, int): |
|
| 174 | 1 | discount_from = [self._discount_from, self._discount_from] |
|
| 175 | 1 | else: |
|
| 176 | 1 | discount_from = [1, 1] |
|
| 177 | 1 | ||
| 178 | d_mat = np.zeros((src_len + 1, tar_len + 1), dtype=np.float_) |
||
| 179 | 1 | if backtrace: |
|
| 180 | trace_mat = np.zeros((src_len + 1, tar_len + 1), dtype=np.int8) |
||
| 181 | 1 | for i in range(1, src_len + 1): |
|
| 182 | 1 | d_mat[i, 0] = d_mat[i - 1, 0] + self._discount_func( |
|
| 183 | max(0, i - discount_from[0]) |
||
| 184 | 1 | ) |
|
| 185 | if backtrace: |
||
| 186 | 1 | trace_mat[i, 0] = 1 |
|
| 187 | 1 | for j in range(1, tar_len + 1): |
|
| 188 | 1 | d_mat[0, j] = d_mat[0, j - 1] + self._discount_func( |
|
| 189 | 1 | max(0, j - discount_from[1]) |
|
| 190 | 1 | ) |
|
| 191 | if backtrace: |
||
| 192 | trace_mat[0, j] = 0 |
||
| 193 | 1 | for i in range(src_len): |
|
| 194 | 1 | i_extend = self._discount_func(max(0, i - discount_from[0])) |
|
| 195 | 1 | for j in range(tar_len): |
|
| 196 | 1 | traces = ((i + 1, j), (i, j + 1), (i, j)) |
|
| 197 | cost = min( |
||
| 198 | i_extend, self._discount_func(max(0, j - discount_from[1])) |
||
| 199 | 1 | ) |
|
| 200 | 1 | opts = ( |
|
| 201 | 1 | d_mat[traces[0]] + cost, # ins |
|
| 202 | 1 | d_mat[traces[1]] + cost, # del |
|
| 203 | 1 | d_mat[traces[2]] |
|
| 204 | 1 | + (cost if src[i] != tar[j] else 0), # sub/== |
|
| 205 | 1 | ) |
|
| 206 | 1 | d_mat[i + 1, j + 1] = min(opts) |
|
| 207 | if backtrace: |
||
| 208 | trace_mat[i + 1, j + 1] = int(np.argmin(opts)) |
||
| 209 | |||
| 210 | if self._mode == 'osa': |
||
| 211 | if ( |
||
| 212 | 1 | i + 1 > 1 |
|
| 213 | 1 | and j + 1 > 1 |
|
| 214 | 1 | and src[i] == tar[j - 1] |
|
| 215 | and src[i - 1] == tar[j] |
||
| 216 | 1 | ): |
|
| 217 | 1 | # transposition |
|
| 218 | d_mat[i + 1, j + 1] = min( |
||
| 219 | d_mat[i + 1, j + 1], d_mat[i - 1, j - 1] + cost |
||
| 220 | ) |
||
| 221 | if backtrace: |
||
| 222 | trace_mat[i + 1, j + 1] = 2 |
||
| 223 | if backtrace: |
||
| 224 | 1 | return d_mat, trace_mat |
|
|
|
|||
| 225 | return d_mat |
||
| 226 | |||
| 364 |