|
1
|
|
|
# -*- coding: utf-8 -*- |
|
2
|
|
|
import elasticsearch_dsl as es |
|
3
|
|
|
|
|
4
|
|
|
from pyjobsweb.lib.elasticsearch_ import compute_index_name |
|
5
|
|
|
|
|
6
|
|
|
|
|
7
|
|
|
class Geocomplete(es.DocType): |
|
8
|
|
|
class Meta: |
|
9
|
|
|
index = 'geocomplete' |
|
10
|
|
|
doc_type = 'geoloc-entry' |
|
11
|
|
|
|
|
12
|
|
|
french_elision = es.token_filter( |
|
13
|
|
|
'french_elision', |
|
14
|
|
|
type='elision', |
|
15
|
|
|
articles_case=True, |
|
16
|
|
|
articles=[ |
|
17
|
|
|
'l', 'm', 't', 'qu', 'n', 's', |
|
18
|
|
|
'j', 'd', 'c', 'jusqu', 'quoiqu', |
|
19
|
|
|
'lorsqu', 'puisqu' |
|
20
|
|
|
] |
|
21
|
|
|
) |
|
22
|
|
|
|
|
23
|
|
|
geocompletion_ngram_filter = es.token_filter( |
|
24
|
|
|
'geocompletion_ngram', |
|
25
|
|
|
type='edgeNGram', |
|
26
|
|
|
min_gram=1, |
|
27
|
|
|
max_gram=50, |
|
28
|
|
|
side='front' |
|
29
|
|
|
) |
|
30
|
|
|
|
|
31
|
|
|
town_filter = es.token_filter( |
|
32
|
|
|
'town_filter', |
|
33
|
|
|
type='pattern_replace', |
|
34
|
|
|
pattern=' ', |
|
35
|
|
|
replacement='-' |
|
36
|
|
|
) |
|
37
|
|
|
|
|
38
|
|
|
geocompletion_index_tokenizer = es.tokenizer( |
|
39
|
|
|
'geocompletion_index_tokenizer', |
|
40
|
|
|
type='pattern', |
|
41
|
|
|
pattern='@' |
|
42
|
|
|
) |
|
43
|
|
|
|
|
44
|
|
|
geocompletion_index_analyzer = es.analyzer( |
|
45
|
|
|
'geocompletion_index_analyzer', |
|
46
|
|
|
type='custom', |
|
47
|
|
|
tokenizer=geocompletion_index_tokenizer, |
|
48
|
|
|
filter=[ |
|
49
|
|
|
'lowercase', |
|
50
|
|
|
'asciifolding', |
|
51
|
|
|
french_elision, |
|
52
|
|
|
town_filter, |
|
53
|
|
|
geocompletion_ngram_filter |
|
54
|
|
|
] |
|
55
|
|
|
) |
|
56
|
|
|
|
|
57
|
|
|
geocompletion_search_analyzer = es.analyzer( |
|
58
|
|
|
'geocompletion_search_analyzer', |
|
59
|
|
|
type='custom', |
|
60
|
|
|
tokenizer=geocompletion_index_tokenizer, |
|
61
|
|
|
filter=[ |
|
62
|
|
|
'lowercase', |
|
63
|
|
|
'asciifolding', |
|
64
|
|
|
town_filter, |
|
65
|
|
|
french_elision |
|
66
|
|
|
] |
|
67
|
|
|
) |
|
68
|
|
|
|
|
69
|
|
|
name = es.String( |
|
70
|
|
|
index='analyzed', |
|
71
|
|
|
analyzer=geocompletion_index_analyzer, |
|
72
|
|
|
search_analyzer=geocompletion_search_analyzer, |
|
73
|
|
|
fields=dict(raw=es.String(index='not_analyzed')) |
|
74
|
|
|
) |
|
75
|
|
|
|
|
76
|
|
|
complement = es.String(index='not_analyzed') |
|
77
|
|
|
|
|
78
|
|
|
postal_code_ngram_filter = es.token_filter( |
|
79
|
|
|
'postal_code_ngram', |
|
80
|
|
|
type='edgeNGram', |
|
81
|
|
|
min_gram=1, |
|
82
|
|
|
max_gram=5, |
|
83
|
|
|
side='front' |
|
84
|
|
|
) |
|
85
|
|
|
|
|
86
|
|
|
postal_code_index_analyzer = es.analyzer( |
|
87
|
|
|
'postal_code_index_analyzer', |
|
88
|
|
|
type='custom', |
|
89
|
|
|
tokenizer='standard', |
|
90
|
|
|
filter=[ |
|
91
|
|
|
postal_code_ngram_filter |
|
92
|
|
|
] |
|
93
|
|
|
) |
|
94
|
|
|
|
|
95
|
|
|
postal_code_search_analyzer = es.analyzer( |
|
96
|
|
|
'postal_code_search_analyzer', |
|
97
|
|
|
type='custom', |
|
98
|
|
|
tokenizer='standard' |
|
99
|
|
|
) |
|
100
|
|
|
|
|
101
|
|
|
postal_code = es.String( |
|
102
|
|
|
index='analyzed', |
|
103
|
|
|
analyzer=postal_code_index_analyzer, |
|
104
|
|
|
search_analyzer=postal_code_search_analyzer, |
|
105
|
|
|
fields=dict(raw=es.String(index='not_analyzed')) |
|
106
|
|
|
) |
|
107
|
|
|
|
|
108
|
|
|
geolocation = es.GeoPoint() |
|
109
|
|
|
|
|
110
|
|
|
weight = es.Float() |
|
111
|
|
|
|
|
112
|
|
|
def __init__(self, meta=None, **kwargs): |
|
113
|
|
|
super(Geocomplete, self).__init__(meta, **kwargs) |
|
114
|
|
|
|
|
115
|
|
|
if self.index in compute_index_name(self.index): |
|
116
|
|
|
self._doc_type.index = compute_index_name(self.index) |
|
117
|
|
|
|
|
118
|
|
|
@property |
|
119
|
|
|
def index(self): |
|
120
|
|
|
return self._doc_type.index |
|
121
|
|
|
|
|
122
|
|
|
@property |
|
123
|
|
|
def doc_type(self): |
|
124
|
|
|
return self._doc_type.name |
|
125
|
|
|
|