Completed
Push — dev-0.5.2 ( 071035...16c7f4 )
by Felipe A.
59s
created

NodeCacheManager.app()   A

Complexity

Conditions 2

Size

Total Lines 6

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 2
dl 0
loc 6
rs 9.4285
c 1
b 0
f 0
1
2
3
import functools
4
import threading
5
6
from watchdog.observers import Observer as WatchdogObserver
7
from watchdog.events import FileSystemEventHandler as WatchdogEventHandler
8
9
10
class HashTuple(tuple):
11
    '''
12
    Tuple object with cached __hash__ and __repr__.
13
    '''
14
15
    def __hash__(self):
16
        '''
17
        Self-caching hash method.
18
19
        x.__hash__() <==> hash(x)
20
        '''
21
        res = tuple.__hash__(self)
22
        self.__hash__ = lambda x: res
23
        return res
24
25
    def __repr__(self):
26
        '''
27
        Self-caching representation method.
28
29
        x.__repr__() <==> repr(x)
30
        '''
31
        res = (
32
            '<{0.__class__.__module__}.{0.__class__.__name__} {1} {2}>'
33
            ).format(self, hash(self), tuple(self))
34
        self.__repr__ = lambda x: res
35
        return res
36
37
38
class LRUCache(object):
39
    '''
40
    Cache object evicting least recently used objects (LRU) when maximum cache
41
    size is reached.
42
43
    It's way slower than python's :func:`functools.lru_cache` but provides an
44
    object-oriented implementation and manual cache eviction.
45
    '''
46
    NOT_FOUND = object()
47
48
    lock_class = threading.RLock
49
50
    @property
51
    def full(self):
52
        '''
53
        :returns: True if size reached maxsize, False otherwise
54
        :rtype: bool
55
        '''
56
        return self._full
57
58
    @property
59
    def size(self):
60
        '''
61
        :returns: current size of result cache
62
        :rtype: int
63
        '''
64
        return len(self._cache)
65
66
    @property
67
    def maxsize(self):
68
        '''
69
        :returns: maximum size of result cache
70
        :rtype: int
71
        '''
72
        return self._maxsize
73
74
    def __init__(self, maxsize=1024):
75
        '''
76
        :param fnc:
77
        :type fnc: collections.abc.Callable
78
        :param maxsize: optional maximum cache size (defaults to 1024)
79
        :type maxsize: int
80
        '''
81
        self._cache = {}
82
        self._full = False
83
        self._maxsize = maxsize
84
        self._lock = self.lock_class()
85
        self._root = root = []
86
        root[:] = [root, root, None, None]
87
88
    def _bumpitem(self, key):
89
        PREV, NEXT = 0, 1
90
        root = self._root
91
        last = root[PREV]
92
        last[NEXT] = root[PREV] = link = self._cache[key]
93
        link[PREV] = last
94
        link[NEXT] = root
95
96
    def _getitem(self, key, default=NOT_FOUND):
97
        RESULT = 3
98
        link = self._cache.get(key)
99
        if link is not None:
100
            self._bumpitem(key)
101
            return link[RESULT]
102
        if default is self.NOT_FOUND:
103
            raise KeyError(key)
104
        return default
105
106
    def _setitem(self, key, value):
107
        PREV, NEXT, KEY, RESULT = 0, 1, 2, 3
108
        cache = self._cache
109
        root = self._root
110
        if key in cache:
111
            self._bumpitem(key)
112
            root[PREV][RESULT] = value
113
        elif self._full:
114
            # reuse old root
115
            oldroot = root
116
            oldroot[KEY] = key
117
            oldroot[RESULT] = value
118
            root = oldroot[NEXT]
119
            oldkey = root[KEY]
120
            root[KEY] = root[RESULT] = None
121
            del cache[oldkey]
122
            cache[key] = oldroot
123
            self._root = root
124
        else:
125
            last = root[PREV]
126
            link = [last, root, key, value]
127
            last[NEXT] = root[PREV] = cache[key] = link
128
            self._full = (len(cache) >= self._maxsize)
129
        return value
130
131
    def _popitem(self, key, default=NOT_FOUND):
132
        PREV, NEXT = 0, 1
133
        if default is self.NOT_FOUND:
134
            link_prev, link_next, key, result = self._cache.pop(key)
135
        else:
136
            link_prev, link_next, key, result = self._cache.pop(key, default)
137
        link_prev[NEXT] = link_next
138
        link_next[PREV] = link_prev
139
        return result
140
141
    def __getitem__(self, key):
142
        with self._lock:
143
            return self._getitem(key)
144
145
    def __setitem__(self, key, value):
146
        with self._lock:
147
            return self._setitem(key, value)
148
149
    def __delitem__(self, key):
150
        with self._lock:
151
            self._popitem(key)
152
153
    def __contains__(self, key):
154
        return key in self._cache
155
156
    def pop(self, key, default=NOT_FOUND):
157
        with self._lock:
158
            return self._popitem(key, default)
159
160
    def get(self, key, default=None):
161
        with self._lock:
162
            return self._getitem(key, default)
163
164
    def clear(self):
165
        with self._lock:
166
            self._cache.clear()
167
            self._root = root = []
168
            root[:] = [root, root, None, None]
169
            self._hits = 0
170
            self._misses = 0
171
            self._full = False
172
173
174
class MemoizeManager(object):
175
    cache_class = LRUCache
176
    hashtuple_class = HashTuple
177
    NOT_FOUND = object()
178
179
    def __init__(self, fnc, maxsize=1024):
180
        self._fnc = fnc
181
        self._hits = 0
182
        self._misses = 0
183
        self._cache = self.cache_class(maxsize)
184
185
    @property
186
    def hits(self):
187
        '''
188
        :returns: number of times result was cached
189
        :rtype: integer
190
        '''
191
        return self._hits
192
193
    @property
194
    def misses(self):
195
        '''
196
        :returns: number of times result was not cached
197
        :rtype: integer
198
        '''
199
        return self._missses
200
201
    @classmethod
202
    def make_key(cls, args, kwargs, tuple=tuple):
203
        '''
204
        Make a cache key from optionally typed positional and keyword arguments
205
206
        The key is constructed in a way that is flat as possible rather than
207
        as a nested structure that would take more memory.
208
209
        If there is only a single argument and its data type is known to cache
210
        its hash value, then that argument is returned without a wrapper.  This
211
        saves space and improves lookup speed.
212
        '''
213
        key_args = tuple([(arg, arg.__class__) for arg in args])
214
        key_kwargs = tuple([
215
            (key, value, value.__class__)
216
            for key, value in sorted(kwargs.items())
217
            ])
218
        return cls.hashtuple_class((key_args, key_kwargs))
219
220
    def __call__(self, *args, **kwargs):
221
        NOT_FOUND = self.NOT_FOUND
222
        key = self.make_key(args, kwargs)
223
        result = self._cache.get(key, NOT_FOUND)
224
        if result is not NOT_FOUND:
225
            self._hits += 1
226
            return result
227
        self._misses += 1
228
        self._cache[key] = result = self._fnc(*args, **kwargs)
229
        return result
230
231
    def pop(self, *args, **kwargs):
232
        key = self.make_key(args, kwargs)
233
        return self._cache.pop(key, None)
234
235
    def clear(self):
236
        '''
237
        Clear both cache and statistics.
238
        '''
239
        self._hits = 0
240
        self._misses = 0
241
        self._cache.clear()
242
243
    @classmethod
244
    def wrap(cls, fnc, maxsize=1024):
245
        '''
246
        Decorate given function with a CancelableLRUCache instance.
247
248
        :param fnc: function to decorate
249
        :type fnc: collections.abc.Callable
250
        :param maxsize: optional maximum size (defaults to 1024)
251
        :type maxsize: int
252
        :returns: wrapped cached function
253
        :rtype: function
254
        '''
255
256
        self = cls(fnc, maxsize)
257
258
        @functools.wraps(fnc)
259
        def wrapped(*args, **kwargs):
260
            return self(*args, **kwargs)
261
        wrapped.cache = self
262
        return wrapped
263
264
265
class NodeCacheExpirator(WatchdogEventHandler):
266
    pass
267
268
269
class NodeCacheManager(object):
270
    cache_class = LRUCache
271
    NOT_FOUND = object()
272
273
    @property
274
    def app(self, app):
275
        if app != self._app:
276
            self.observer.uneschedule_all()
277
            self._app = app
278
            self.observer.schedule()
279
280
    def __init__(self, app=None):
281
        self.app = app
282
        self._cache = self.cache_class()
283
        self._observer = WatchdogObserver()
284
285
    def __del__(self):
286
        self._observer.uneschedule_all()
287
        self._observer.stop()
288
        self._observer.join()
289
290
291
def cached(func_or_size):
292
    def inner(func):
293
        size = 1024 if func is func_or_size else func_or_size
294
        return MemoizeManager.wrap(func, size)
295
    return inner(func_or_size) if callable(func_or_size) else inner
296