lru_cache.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. # lru_cache.py -- Simple LRU cache for dulwich
  2. # Copyright (C) 2006, 2008 Canonical Ltd
  3. # Copyright (C) 2022 Jelmer Vernooij <jelmer@jelmer.uk>
  4. #
  5. # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
  6. # General Public License as public by the Free Software Foundation; version 2.0
  7. # or (at your option) any later version. You can redistribute it and/or
  8. # modify it under the terms of either of these two licenses.
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. # You should have received a copy of the licenses; if not, see
  17. # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
  18. # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
  19. # License, Version 2.0.
  20. #
  21. """A simple least-recently-used (LRU) cache."""
  22. from typing import Callable, Dict, Generic, Iterable, Iterator, Optional, TypeVar
  23. _null_key = object()
  24. K = TypeVar("K")
  25. V = TypeVar("V")
  26. class _LRUNode(Generic[K, V]):
  27. """This maintains the linked-list which is the lru internals."""
  28. __slots__ = ("prev", "next_key", "key", "value", "cleanup", "size")
  29. prev: Optional["_LRUNode[K, V]"]
  30. next_key: K
  31. size: Optional[int]
  32. def __init__(self, key: K, value: V, cleanup=None) -> None:
  33. self.prev = None
  34. self.next_key = _null_key # type: ignore
  35. self.key = key
  36. self.value = value
  37. self.cleanup = cleanup
  38. # TODO: We could compute this 'on-the-fly' like we used to, and remove
  39. # one pointer from this object, we just need to decide if it
  40. # actually costs us much of anything in normal usage
  41. self.size = None
  42. def __repr__(self) -> str:
  43. if self.prev is None:
  44. prev_key = None
  45. else:
  46. prev_key = self.prev.key
  47. return f"{self.__class__.__name__}({self.key!r} n:{self.next_key!r} p:{prev_key!r})"
  48. def run_cleanup(self) -> None:
  49. if self.cleanup is not None:
  50. self.cleanup(self.key, self.value)
  51. self.cleanup = None
  52. # Just make sure to break any refcycles, etc
  53. del self.value
  54. class LRUCache(Generic[K, V]):
  55. """A class which manages a cache of entries, removing unused ones."""
  56. _least_recently_used: Optional[_LRUNode[K, V]]
  57. _most_recently_used: Optional[_LRUNode[K, V]]
  58. def __init__(
  59. self, max_cache: int = 100, after_cleanup_count: Optional[int] = None
  60. ) -> None:
  61. self._cache: Dict[K, _LRUNode[K, V]] = {}
  62. # The "HEAD" of the lru linked list
  63. self._most_recently_used = None
  64. # The "TAIL" of the lru linked list
  65. self._least_recently_used = None
  66. self._update_max_cache(max_cache, after_cleanup_count)
  67. def __contains__(self, key: K) -> bool:
  68. return key in self._cache
  69. def __getitem__(self, key: K) -> V:
  70. cache = self._cache
  71. node = cache[key]
  72. # Inlined from _record_access to decrease the overhead of __getitem__
  73. # We also have more knowledge about structure if __getitem__ is
  74. # succeeding, then we know that self._most_recently_used must not be
  75. # None, etc.
  76. mru = self._most_recently_used
  77. if node is mru:
  78. # Nothing to do, this node is already at the head of the queue
  79. return node.value
  80. # Remove this node from the old location
  81. node_prev = node.prev
  82. next_key = node.next_key
  83. # benchmarking shows that the lookup of _null_key in globals is faster
  84. # than the attribute lookup for (node is self._least_recently_used)
  85. if next_key is _null_key:
  86. # 'node' is the _least_recently_used, because it doesn't have a
  87. # 'next' item. So move the current lru to the previous node.
  88. self._least_recently_used = node_prev
  89. else:
  90. node_next = cache[next_key]
  91. node_next.prev = node_prev
  92. assert node_prev
  93. assert mru
  94. node_prev.next_key = next_key
  95. # Insert this node at the front of the list
  96. node.next_key = mru.key
  97. mru.prev = node
  98. self._most_recently_used = node
  99. node.prev = None
  100. return node.value
  101. def __len__(self) -> int:
  102. return len(self._cache)
  103. def _walk_lru(self) -> Iterator[_LRUNode[K, V]]:
  104. """Walk the LRU list, only meant to be used in tests."""
  105. node = self._most_recently_used
  106. if node is not None:
  107. if node.prev is not None:
  108. raise AssertionError(
  109. "the _most_recently_used entry is not"
  110. " supposed to have a previous entry"
  111. f" {node}"
  112. )
  113. while node is not None:
  114. if node.next_key is _null_key:
  115. if node is not self._least_recently_used:
  116. raise AssertionError(
  117. "only the last node should have" f" no next value: {node}"
  118. )
  119. node_next = None
  120. else:
  121. node_next = self._cache[node.next_key]
  122. if node_next.prev is not node:
  123. raise AssertionError(
  124. "inconsistency found, node.next.prev" f" != node: {node}"
  125. )
  126. if node.prev is None:
  127. if node is not self._most_recently_used:
  128. raise AssertionError(
  129. "only the _most_recently_used should"
  130. f" not have a previous node: {node}"
  131. )
  132. else:
  133. if node.prev.next_key != node.key:
  134. raise AssertionError(
  135. "inconsistency found, node.prev.next" f" != node: {node}"
  136. )
  137. yield node
  138. node = node_next
  139. def add(
  140. self, key: K, value: V, cleanup: Optional[Callable[[K, V], None]] = None
  141. ) -> None:
  142. """Add a new value to the cache.
  143. Also, if the entry is ever removed from the cache, call
  144. cleanup(key, value).
  145. Args:
  146. key: The key to store it under
  147. value: The object to store
  148. cleanup: None or a function taking (key, value) to indicate
  149. 'value' should be cleaned up.
  150. """
  151. if key is _null_key:
  152. raise ValueError("cannot use _null_key as a key")
  153. if key in self._cache:
  154. node = self._cache[key]
  155. node.run_cleanup()
  156. node.value = value
  157. node.cleanup = cleanup
  158. else:
  159. node = _LRUNode(key, value, cleanup=cleanup)
  160. self._cache[key] = node
  161. self._record_access(node)
  162. if len(self._cache) > self._max_cache:
  163. # Trigger the cleanup
  164. self.cleanup()
  165. def cache_size(self) -> int:
  166. """Get the number of entries we will cache."""
  167. return self._max_cache
  168. def get(self, key: K, default: Optional[V] = None) -> Optional[V]:
  169. node = self._cache.get(key, None)
  170. if node is None:
  171. return default
  172. self._record_access(node)
  173. return node.value
  174. def keys(self) -> Iterable[K]:
  175. """Get the list of keys currently cached.
  176. Note that values returned here may not be available by the time you
  177. request them later. This is simply meant as a peak into the current
  178. state.
  179. Returns: An unordered list of keys that are currently cached.
  180. """
  181. return self._cache.keys()
  182. def items(self) -> Dict[K, V]:
  183. """Get the key:value pairs as a dict."""
  184. return {k: n.value for k, n in self._cache.items()}
  185. def cleanup(self):
  186. """Clear the cache until it shrinks to the requested size.
  187. This does not completely wipe the cache, just makes sure it is under
  188. the after_cleanup_count.
  189. """
  190. # Make sure the cache is shrunk to the correct size
  191. while len(self._cache) > self._after_cleanup_count:
  192. self._remove_lru()
  193. def __setitem__(self, key: K, value: V) -> None:
  194. """Add a value to the cache, there will be no cleanup function."""
  195. self.add(key, value, cleanup=None)
  196. def _record_access(self, node: _LRUNode[K, V]) -> None:
  197. """Record that key was accessed."""
  198. # Move 'node' to the front of the queue
  199. if self._most_recently_used is None:
  200. self._most_recently_used = node
  201. self._least_recently_used = node
  202. return
  203. elif node is self._most_recently_used:
  204. # Nothing to do, this node is already at the head of the queue
  205. return
  206. # We've taken care of the tail pointer, remove the node, and insert it
  207. # at the front
  208. # REMOVE
  209. if node is self._least_recently_used:
  210. self._least_recently_used = node.prev
  211. if node.prev is not None:
  212. node.prev.next_key = node.next_key
  213. if node.next_key is not _null_key:
  214. node_next = self._cache[node.next_key]
  215. node_next.prev = node.prev
  216. # INSERT
  217. node.next_key = self._most_recently_used.key
  218. self._most_recently_used.prev = node
  219. self._most_recently_used = node
  220. node.prev = None
  221. def _remove_node(self, node: _LRUNode[K, V]) -> None:
  222. if node is self._least_recently_used:
  223. self._least_recently_used = node.prev
  224. self._cache.pop(node.key)
  225. # If we have removed all entries, remove the head pointer as well
  226. if self._least_recently_used is None:
  227. self._most_recently_used = None
  228. node.run_cleanup()
  229. # Now remove this node from the linked list
  230. if node.prev is not None:
  231. node.prev.next_key = node.next_key
  232. if node.next_key is not _null_key:
  233. node_next = self._cache[node.next_key]
  234. node_next.prev = node.prev
  235. # And remove this node's pointers
  236. node.prev = None
  237. node.next_key = _null_key # type: ignore
  238. def _remove_lru(self) -> None:
  239. """Remove one entry from the lru, and handle consequences.
  240. If there are no more references to the lru, then this entry should be
  241. removed from the cache.
  242. """
  243. assert self._least_recently_used
  244. self._remove_node(self._least_recently_used)
  245. def clear(self) -> None:
  246. """Clear out all of the cache."""
  247. # Clean up in LRU order
  248. while self._cache:
  249. self._remove_lru()
  250. def resize(self, max_cache: int, after_cleanup_count: Optional[int] = None) -> None:
  251. """Change the number of entries that will be cached."""
  252. self._update_max_cache(max_cache, after_cleanup_count=after_cleanup_count)
  253. def _update_max_cache(self, max_cache, after_cleanup_count=None):
  254. self._max_cache = max_cache
  255. if after_cleanup_count is None:
  256. self._after_cleanup_count = self._max_cache * 8 / 10
  257. else:
  258. self._after_cleanup_count = min(after_cleanup_count, self._max_cache)
  259. self.cleanup()
  260. class LRUSizeCache(LRUCache[K, V]):
  261. """An LRUCache that removes things based on the size of the values.
  262. This differs in that it doesn't care how many actual items there are,
  263. it just restricts the cache to be cleaned up after so much data is stored.
  264. The size of items added will be computed using compute_size(value), which
  265. defaults to len() if not supplied.
  266. """
  267. _compute_size: Callable[[V], int]
  268. def __init__(
  269. self,
  270. max_size: int = 1024 * 1024,
  271. after_cleanup_size: Optional[int] = None,
  272. compute_size: Optional[Callable[[V], int]] = None,
  273. ) -> None:
  274. """Create a new LRUSizeCache.
  275. Args:
  276. max_size: The max number of bytes to store before we start
  277. clearing out entries.
  278. after_cleanup_size: After cleaning up, shrink everything to this
  279. size.
  280. compute_size: A function to compute the size of the values. We
  281. use a function here, so that you can pass 'len' if you are just
  282. using simple strings, or a more complex function if you are using
  283. something like a list of strings, or even a custom object.
  284. The function should take the form "compute_size(value) => integer".
  285. If not supplied, it defaults to 'len()'
  286. """
  287. self._value_size = 0
  288. if compute_size is None:
  289. self._compute_size = len # type: ignore
  290. else:
  291. self._compute_size = compute_size
  292. self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
  293. LRUCache.__init__(self, max_cache=max(int(max_size / 512), 1))
  294. def add(
  295. self, key: K, value: V, cleanup: Optional[Callable[[K, V], None]] = None
  296. ) -> None:
  297. """Add a new value to the cache.
  298. Also, if the entry is ever removed from the cache, call
  299. cleanup(key, value).
  300. Args:
  301. key: The key to store it under
  302. value: The object to store
  303. cleanup: None or a function taking (key, value) to indicate
  304. 'value' should be cleaned up.
  305. """
  306. if key is _null_key:
  307. raise ValueError("cannot use _null_key as a key")
  308. node = self._cache.get(key, None)
  309. value_len = self._compute_size(value)
  310. if value_len >= self._after_cleanup_size:
  311. # The new value is 'too big to fit', as it would fill up/overflow
  312. # the cache all by itself
  313. if node is not None:
  314. # We won't be replacing the old node, so just remove it
  315. self._remove_node(node)
  316. if cleanup is not None:
  317. cleanup(key, value)
  318. return
  319. if node is None:
  320. node = _LRUNode(key, value, cleanup=cleanup)
  321. self._cache[key] = node
  322. else:
  323. assert node.size is not None
  324. self._value_size -= node.size
  325. node.size = value_len
  326. self._value_size += value_len
  327. self._record_access(node)
  328. if self._value_size > self._max_size:
  329. # Time to cleanup
  330. self.cleanup()
  331. def cleanup(self) -> None:
  332. """Clear the cache until it shrinks to the requested size.
  333. This does not completely wipe the cache, just makes sure it is under
  334. the after_cleanup_size.
  335. """
  336. # Make sure the cache is shrunk to the correct size
  337. while self._value_size > self._after_cleanup_size:
  338. self._remove_lru()
  339. def _remove_node(self, node: _LRUNode[K, V]) -> None:
  340. assert node.size is not None
  341. self._value_size -= node.size
  342. LRUCache._remove_node(self, node)
  343. def resize(self, max_size: int, after_cleanup_size: Optional[int] = None) -> None:
  344. """Change the number of bytes that will be cached."""
  345. self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
  346. max_cache = max(int(max_size / 512), 1)
  347. self._update_max_cache(max_cache)
  348. def _update_max_size(
  349. self, max_size: int, after_cleanup_size: Optional[int] = None
  350. ) -> None:
  351. self._max_size = max_size
  352. if after_cleanup_size is None:
  353. self._after_cleanup_size = self._max_size * 8 // 10
  354. else:
  355. self._after_cleanup_size = min(after_cleanup_size, self._max_size)