| Conditions | 14 |
| Total Lines | 90 |
| Code Lines | 47 |
| Lines | 0 |
| Ratio | 0 % |
| Tests | 42 |
| CRAP Score | 14 |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like abydos.distance._meta_levenshtein.MetaLevenshtein.dist_abs() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # Copyright 2019-2020 by Christopher C. Little. |
||
| 106 | 1 | def dist_abs(self, src: str, tar: str) -> float: |
|
| 107 | 1 | """Return the Meta-Levenshtein distance of two strings. |
|
| 108 | |||
| 109 | 1 | Parameters |
|
| 110 | ---------- |
||
| 111 | src : str |
||
| 112 | Source string for comparison |
||
| 113 | tar : str |
||
| 114 | Target string for comparison |
||
| 115 | |||
| 116 | Returns |
||
| 117 | ------- |
||
| 118 | float |
||
| 119 | Meta-Levenshtein distance |
||
| 120 | |||
| 121 | Examples |
||
| 122 | -------- |
||
| 123 | >>> cmp = MetaLevenshtein() |
||
| 124 | >>> cmp.dist_abs('cat', 'hat') |
||
| 125 | 0.6155602628882225 |
||
| 126 | >>> cmp.dist_abs('Niall', 'Neil') |
||
| 127 | 2.538900657220556 |
||
| 128 | >>> cmp.dist_abs('aluminum', 'Catalan') |
||
| 129 | 6.940747163450747 |
||
| 130 | >>> cmp.dist_abs('ATCG', 'TAGC') |
||
| 131 | 3.2311205257764453 |
||
| 132 | |||
| 133 | |||
| 134 | .. versionadded:: 0.4.0 |
||
| 135 | |||
| 136 | """ |
||
| 137 | if src == tar: |
||
| 138 | return 0.0 |
||
| 139 | if not src: |
||
| 140 | 1 | return float(len(tar)) |
|
| 141 | 1 | if not tar: |
|
| 142 | 1 | return float(len(src)) |
|
| 143 | 1 | ||
| 144 | 1 | src_tok = self.params['tokenizer'].tokenize(src) |
|
| 145 | 1 | src_ordered = src_tok.get_list() |
|
| 146 | src_tok = src_tok.get_counter() |
||
| 147 | 1 | ||
| 148 | 1 | tar_tok = self.params['tokenizer'].tokenize(tar) |
|
| 149 | 1 | tar_ordered = tar_tok.get_list() |
|
| 150 | tar_tok = tar_tok.get_counter() |
||
| 151 | 1 | ||
| 152 | 1 | if self._corpus is None: |
|
| 153 | 1 | corpus = UnigramCorpus(word_tokenizer=self.params['tokenizer']) |
|
| 154 | corpus.add_document(src) |
||
| 155 | 1 | corpus.add_document(tar) |
|
| 156 | 1 | else: |
|
| 157 | 1 | corpus = self._corpus |
|
| 158 | 1 | ||
| 159 | dists = defaultdict(float) # type: DefaultDict[Tuple[str, str], float] |
||
| 160 | 1 | s_toks = set(src_tok.keys()) |
|
| 161 | t_toks = set(tar_tok.keys()) |
||
| 162 | 1 | for s_tok in s_toks: |
|
| 163 | 1 | for t_tok in t_toks: |
|
| 164 | 1 | dists[(s_tok, t_tok)] = ( |
|
| 165 | 1 | self._metric.dist(s_tok, t_tok) if s_tok != t_tok else 0 |
|
| 166 | 1 | ) |
|
| 167 | 1 | ||
| 168 | vws_dict = {} |
||
| 169 | vwt_dict = {} |
||
| 170 | for token in src_tok.keys(): |
||
| 171 | 1 | vws_dict[token] = log1p(src_tok[token]) * corpus.idf(token) |
|
| 172 | 1 | for token in tar_tok.keys(): |
|
| 173 | 1 | vwt_dict[token] = log1p(tar_tok[token]) * corpus.idf(token) |
|
| 174 | 1 | ||
| 175 | 1 | def _dist(s_tok: str, t_tok: str) -> float: |
|
| 176 | 1 | return dists[(s_tok, t_tok)] * vws_dict[s_tok] * vwt_dict[t_tok] |
|
| 177 | |||
| 178 | 1 | d_mat = np_zeros( |
|
| 179 | 1 | (len(src_ordered) + 1, len(tar_ordered) + 1), dtype=np_float |
|
| 180 | ) |
||
| 181 | 1 | for i in range(len(src_ordered) + 1): |
|
| 182 | d_mat[i, 0] = i |
||
| 183 | for j in range(len(tar_ordered) + 1): |
||
| 184 | 1 | d_mat[0, j] = j |
|
| 185 | 1 | ||
| 186 | 1 | for i in range(len(src_ordered)): |
|
| 187 | 1 | for j in range(len(tar_ordered)): |
|
| 188 | d_mat[i + 1, j + 1] = min( |
||
| 189 | 1 | d_mat[i + 1, j] + 1, # ins |
|
| 190 | 1 | d_mat[i, j + 1] + 1, # del |
|
| 191 | 1 | d_mat[i, j] |
|
| 192 | + _dist(src_ordered[i], tar_ordered[j]), # sub/== |
||
| 193 | ) |
||
| 194 | |||
| 195 | return cast(float, d_mat[len(src_ordered), len(tar_ordered)]) |
||
| 196 | |||
| 256 |