|
| 1 | +from functools import lru_cache |
1 | 2 | from functools import wraps
|
2 | 3 | from inspect import FullArgSpec
|
3 | 4 | from inspect import getfullargspec
|
4 | 5 | from inspect import isgeneratorfunction
|
5 |
| -from threading import RLock |
6 | 6 | from typing import Any # noqa:F401
|
7 | 7 | from typing import Callable # noqa:F401
|
8 | 8 | from typing import Optional # noqa:F401
|
9 | 9 | from typing import Type # noqa:F401
|
10 |
| -from typing import TypeVar # noqa:F401 |
| 10 | +from typing import TypeVar |
11 | 11 |
|
12 | 12 |
|
13 | 13 | miss = object()
|
|
17 | 17 | M = Callable[[Any, T], Any]
|
18 | 18 |
|
19 | 19 |
|
20 |
| -class LFUCache(dict): |
21 |
| - """Simple LFU cache implementation. |
| 20 | +def cached(maxsize: int = 256) -> Callable[[Callable], Callable]: |
| 21 | + def _(f: Callable) -> Callable: |
| 22 | + return lru_cache(maxsize)(f) |
22 | 23 |
|
23 |
| - This cache is designed for memoizing functions with a single hashable |
24 |
| - argument. The eviction policy is LFU, i.e. the least frequently used values |
25 |
| - are evicted when the cache is full. The amortized cost of shrinking the |
26 |
| - cache when it grows beyond the requested size is O(log(size)). |
27 |
| - """ |
28 |
| - |
29 |
| - def __init__(self, maxsize=256): |
30 |
| - # type: (int) -> None |
31 |
| - self.maxsize = maxsize |
32 |
| - self.lock = RLock() |
33 |
| - self.count_lock = RLock() |
34 |
| - |
35 |
| - def get(self, key, f): # type: ignore[override] |
36 |
| - # type: (T, F) -> Any |
37 |
| - """Get a value from the cache. |
38 |
| -
|
39 |
| - If the value with the given key is not in the cache, the expensive |
40 |
| - function ``f`` is called on the key to generate it. The return value is |
41 |
| - then stored in the cache and returned to the caller. |
42 |
| - """ |
43 |
| - |
44 |
| - _ = super(LFUCache, self).get(key, miss) |
45 |
| - if _ is not miss: |
46 |
| - with self.count_lock: |
47 |
| - value, count = _ |
48 |
| - self[key] = (value, count + 1) |
49 |
| - return value |
50 |
| - |
51 |
| - with self.lock: |
52 |
| - _ = super(LFUCache, self).get(key, miss) |
53 |
| - if _ is not miss: |
54 |
| - with self.count_lock: |
55 |
| - value, count = _ |
56 |
| - self[key] = (value, count + 1) |
57 |
| - return value |
58 |
| - |
59 |
| - # Cache miss: ensure that we have enough space in the cache |
60 |
| - # by evicting half of the entries when we go over the threshold |
61 |
| - while len(self) >= self.maxsize: |
62 |
| - for h in sorted(self, key=lambda h: self[h][1])[: self.maxsize >> 1]: |
63 |
| - del self[h] |
64 |
| - |
65 |
| - value = f(key) |
66 |
| - |
67 |
| - self[key] = (value, 1) |
68 |
| - |
69 |
| - return value |
70 |
| - |
71 |
| - |
72 |
| -def cached(maxsize=256): |
73 |
| - # type: (int) -> Callable[[F], F] |
74 |
| - """Decorator for memoizing functions of a single argument (LFU policy).""" |
75 |
| - |
76 |
| - def cached_wrapper(f): |
77 |
| - # type: (F) -> F |
78 |
| - cache = LFUCache(maxsize) |
79 |
| - |
80 |
| - def cached_f(key): |
81 |
| - # type: (T) -> Any |
82 |
| - return cache.get(key, f) |
83 |
| - |
84 |
| - cached_f.invalidate = cache.clear # type: ignore[attr-defined] |
85 |
| - |
86 |
| - return cached_f |
87 |
| - |
88 |
| - return cached_wrapper |
| 24 | + return _ |
89 | 25 |
|
90 | 26 |
|
91 |
| -class CachedMethodDescriptor(object): |
| 27 | +class CachedMethodDescriptor: |
92 | 28 | def __init__(self, method, maxsize):
|
93 | 29 | # type: (M, int) -> None
|
94 | 30 | self._method = method
|
|
0 commit comments