| Total Complexity | 2 |
| Total Lines | 29 |
| Duplicated Lines | 0 % |
| Changes | 0 | ||
| 1 | """EstNLTK analyzer for Annif which uses EstNLTK for lemmatization""" |
||
| 2 | |||
| 3 | from __future__ import annotations |
||
| 4 | |||
| 5 | import annif.util |
||
| 6 | from annif.exception import OperationFailedException |
||
| 7 | |||
| 8 | from . import analyzer |
||
| 9 | |||
| 10 | |||
| 11 | class EstNLTKAnalyzer(analyzer.Analyzer): |
||
| 12 | name = "estnltk" |
||
| 13 | |||
| 14 | def __init__(self, param: str, **kwargs) -> None: |
||
| 15 | self.param = param |
||
| 16 | super().__init__(**kwargs) |
||
| 17 | |||
| 18 | def tokenize_words(self, text: str, filter: bool = True) -> list[str]: |
||
| 19 | import estnltk |
||
| 20 | |||
| 21 | txt = estnltk.Text(text.strip()) |
||
| 22 | txt.tag_layer() |
||
| 23 | lemmas = [ |
||
| 24 | lemma |
||
| 25 | for lemma in [l[0] for l in txt.lemma] |
||
| 26 | if (not filter or self.is_valid_token(lemma)) |
||
| 27 | ] |
||
| 28 | return lemmas |
||
| 29 |