lru_cache.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413
  1. # lru_cache.py -- Simple LRU cache for dulwich
  2. # Copyright (C) 2006, 2008 Canonical Ltd
  3. # Copyright (C) 2022 Jelmer Vernooij <jelmer@jelmer.uk>
  4. #
  5. # SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
  6. # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
  7. # General Public License as public by the Free Software Foundation; version 2.0
  8. # or (at your option) any later version. You can redistribute it and/or
  9. # modify it under the terms of either of these two licenses.
  10. #
  11. # Unless required by applicable law or agreed to in writing, software
  12. # distributed under the License is distributed on an "AS IS" BASIS,
  13. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14. # See the License for the specific language governing permissions and
  15. # limitations under the License.
  16. #
  17. # You should have received a copy of the licenses; if not, see
  18. # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
  19. # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
  20. # License, Version 2.0.
  21. #
  22. """A simple least-recently-used (LRU) cache."""
  23. from collections.abc import Iterable, Iterator
  24. from typing import Callable, Generic, Optional, TypeVar
  25. _null_key = object()
  26. K = TypeVar("K")
  27. V = TypeVar("V")
  28. class _LRUNode(Generic[K, V]):
  29. """This maintains the linked-list which is the lru internals."""
  30. __slots__ = ("cleanup", "key", "next_key", "prev", "size", "value")
  31. prev: Optional["_LRUNode[K, V]"]
  32. next_key: K
  33. size: Optional[int]
  34. def __init__(self, key: K, value: V, cleanup=None) -> None:
  35. self.prev = None
  36. self.next_key = _null_key # type: ignore
  37. self.key = key
  38. self.value = value
  39. self.cleanup = cleanup
  40. # TODO: We could compute this 'on-the-fly' like we used to, and remove
  41. # one pointer from this object, we just need to decide if it
  42. # actually costs us much of anything in normal usage
  43. self.size = None
  44. def __repr__(self) -> str:
  45. if self.prev is None:
  46. prev_key = None
  47. else:
  48. prev_key = self.prev.key
  49. return f"{self.__class__.__name__}({self.key!r} n:{self.next_key!r} p:{prev_key!r})"
  50. def run_cleanup(self) -> None:
  51. if self.cleanup is not None:
  52. self.cleanup(self.key, self.value)
  53. self.cleanup = None
  54. # Just make sure to break any refcycles, etc
  55. del self.value
  56. class LRUCache(Generic[K, V]):
  57. """A class which manages a cache of entries, removing unused ones."""
  58. _least_recently_used: Optional[_LRUNode[K, V]]
  59. _most_recently_used: Optional[_LRUNode[K, V]]
  60. def __init__(
  61. self, max_cache: int = 100, after_cleanup_count: Optional[int] = None
  62. ) -> None:
  63. self._cache: dict[K, _LRUNode[K, V]] = {}
  64. # The "HEAD" of the lru linked list
  65. self._most_recently_used = None
  66. # The "TAIL" of the lru linked list
  67. self._least_recently_used = None
  68. self._update_max_cache(max_cache, after_cleanup_count)
  69. def __contains__(self, key: K) -> bool:
  70. return key in self._cache
  71. def __getitem__(self, key: K) -> V:
  72. cache = self._cache
  73. node = cache[key]
  74. # Inlined from _record_access to decrease the overhead of __getitem__
  75. # We also have more knowledge about structure if __getitem__ is
  76. # succeeding, then we know that self._most_recently_used must not be
  77. # None, etc.
  78. mru = self._most_recently_used
  79. if node is mru:
  80. # Nothing to do, this node is already at the head of the queue
  81. return node.value
  82. # Remove this node from the old location
  83. node_prev = node.prev
  84. next_key = node.next_key
  85. # benchmarking shows that the lookup of _null_key in globals is faster
  86. # than the attribute lookup for (node is self._least_recently_used)
  87. if next_key is _null_key:
  88. # 'node' is the _least_recently_used, because it doesn't have a
  89. # 'next' item. So move the current lru to the previous node.
  90. self._least_recently_used = node_prev
  91. else:
  92. node_next = cache[next_key]
  93. node_next.prev = node_prev
  94. assert node_prev
  95. assert mru
  96. node_prev.next_key = next_key
  97. # Insert this node at the front of the list
  98. node.next_key = mru.key
  99. mru.prev = node
  100. self._most_recently_used = node
  101. node.prev = None
  102. return node.value
  103. def __len__(self) -> int:
  104. return len(self._cache)
  105. def _walk_lru(self) -> Iterator[_LRUNode[K, V]]:
  106. """Walk the LRU list, only meant to be used in tests."""
  107. node = self._most_recently_used
  108. if node is not None:
  109. if node.prev is not None:
  110. raise AssertionError(
  111. "the _most_recently_used entry is not"
  112. " supposed to have a previous entry"
  113. f" {node}"
  114. )
  115. while node is not None:
  116. if node.next_key is _null_key:
  117. if node is not self._least_recently_used:
  118. raise AssertionError(
  119. f"only the last node should have no next value: {node}"
  120. )
  121. node_next = None
  122. else:
  123. node_next = self._cache[node.next_key]
  124. if node_next.prev is not node:
  125. raise AssertionError(
  126. f"inconsistency found, node.next.prev != node: {node}"
  127. )
  128. if node.prev is None:
  129. if node is not self._most_recently_used:
  130. raise AssertionError(
  131. "only the _most_recently_used should"
  132. f" not have a previous node: {node}"
  133. )
  134. else:
  135. if node.prev.next_key != node.key:
  136. raise AssertionError(
  137. f"inconsistency found, node.prev.next != node: {node}"
  138. )
  139. yield node
  140. node = node_next
  141. def add(
  142. self, key: K, value: V, cleanup: Optional[Callable[[K, V], None]] = None
  143. ) -> None:
  144. """Add a new value to the cache.
  145. Also, if the entry is ever removed from the cache, call
  146. cleanup(key, value).
  147. Args:
  148. key: The key to store it under
  149. value: The object to store
  150. cleanup: None or a function taking (key, value) to indicate
  151. 'value' should be cleaned up.
  152. """
  153. if key is _null_key:
  154. raise ValueError("cannot use _null_key as a key")
  155. if key in self._cache:
  156. node = self._cache[key]
  157. node.run_cleanup()
  158. node.value = value
  159. node.cleanup = cleanup
  160. else:
  161. node = _LRUNode(key, value, cleanup=cleanup)
  162. self._cache[key] = node
  163. self._record_access(node)
  164. if len(self._cache) > self._max_cache:
  165. # Trigger the cleanup
  166. self.cleanup()
  167. def cache_size(self) -> int:
  168. """Get the number of entries we will cache."""
  169. return self._max_cache
  170. def get(self, key: K, default: Optional[V] = None) -> Optional[V]:
  171. node = self._cache.get(key, None)
  172. if node is None:
  173. return default
  174. self._record_access(node)
  175. return node.value
  176. def keys(self) -> Iterable[K]:
  177. """Get the list of keys currently cached.
  178. Note that values returned here may not be available by the time you
  179. request them later. This is simply meant as a peak into the current
  180. state.
  181. Returns: An unordered list of keys that are currently cached.
  182. """
  183. return self._cache.keys()
  184. def items(self) -> dict[K, V]:
  185. """Get the key:value pairs as a dict."""
  186. return {k: n.value for k, n in self._cache.items()}
  187. def cleanup(self) -> None:
  188. """Clear the cache until it shrinks to the requested size.
  189. This does not completely wipe the cache, just makes sure it is under
  190. the after_cleanup_count.
  191. """
  192. # Make sure the cache is shrunk to the correct size
  193. while len(self._cache) > self._after_cleanup_count:
  194. self._remove_lru()
  195. def __setitem__(self, key: K, value: V) -> None:
  196. """Add a value to the cache, there will be no cleanup function."""
  197. self.add(key, value, cleanup=None)
  198. def _record_access(self, node: _LRUNode[K, V]) -> None:
  199. """Record that key was accessed."""
  200. # Move 'node' to the front of the queue
  201. if self._most_recently_used is None:
  202. self._most_recently_used = node
  203. self._least_recently_used = node
  204. return
  205. elif node is self._most_recently_used:
  206. # Nothing to do, this node is already at the head of the queue
  207. return
  208. # We've taken care of the tail pointer, remove the node, and insert it
  209. # at the front
  210. # REMOVE
  211. if node is self._least_recently_used:
  212. self._least_recently_used = node.prev
  213. if node.prev is not None:
  214. node.prev.next_key = node.next_key
  215. if node.next_key is not _null_key:
  216. node_next = self._cache[node.next_key]
  217. node_next.prev = node.prev
  218. # INSERT
  219. node.next_key = self._most_recently_used.key
  220. self._most_recently_used.prev = node
  221. self._most_recently_used = node
  222. node.prev = None
  223. def _remove_node(self, node: _LRUNode[K, V]) -> None:
  224. if node is self._least_recently_used:
  225. self._least_recently_used = node.prev
  226. self._cache.pop(node.key)
  227. # If we have removed all entries, remove the head pointer as well
  228. if self._least_recently_used is None:
  229. self._most_recently_used = None
  230. node.run_cleanup()
  231. # Now remove this node from the linked list
  232. if node.prev is not None:
  233. node.prev.next_key = node.next_key
  234. if node.next_key is not _null_key:
  235. node_next = self._cache[node.next_key]
  236. node_next.prev = node.prev
  237. # And remove this node's pointers
  238. node.prev = None
  239. node.next_key = _null_key # type: ignore
  240. def _remove_lru(self) -> None:
  241. """Remove one entry from the lru, and handle consequences.
  242. If there are no more references to the lru, then this entry should be
  243. removed from the cache.
  244. """
  245. assert self._least_recently_used
  246. self._remove_node(self._least_recently_used)
  247. def clear(self) -> None:
  248. """Clear out all of the cache."""
  249. # Clean up in LRU order
  250. while self._cache:
  251. self._remove_lru()
  252. def resize(self, max_cache: int, after_cleanup_count: Optional[int] = None) -> None:
  253. """Change the number of entries that will be cached."""
  254. self._update_max_cache(max_cache, after_cleanup_count=after_cleanup_count)
  255. def _update_max_cache(self, max_cache, after_cleanup_count=None) -> None:
  256. self._max_cache = max_cache
  257. if after_cleanup_count is None:
  258. self._after_cleanup_count = self._max_cache * 8 / 10
  259. else:
  260. self._after_cleanup_count = min(after_cleanup_count, self._max_cache)
  261. self.cleanup()
  262. class LRUSizeCache(LRUCache[K, V]):
  263. """An LRUCache that removes things based on the size of the values.
  264. This differs in that it doesn't care how many actual items there are,
  265. it just restricts the cache to be cleaned up after so much data is stored.
  266. The size of items added will be computed using compute_size(value), which
  267. defaults to len() if not supplied.
  268. """
  269. _compute_size: Callable[[V], int]
  270. def __init__(
  271. self,
  272. max_size: int = 1024 * 1024,
  273. after_cleanup_size: Optional[int] = None,
  274. compute_size: Optional[Callable[[V], int]] = None,
  275. ) -> None:
  276. """Create a new LRUSizeCache.
  277. Args:
  278. max_size: The max number of bytes to store before we start
  279. clearing out entries.
  280. after_cleanup_size: After cleaning up, shrink everything to this
  281. size.
  282. compute_size: A function to compute the size of the values. We
  283. use a function here, so that you can pass 'len' if you are just
  284. using simple strings, or a more complex function if you are using
  285. something like a list of strings, or even a custom object.
  286. The function should take the form "compute_size(value) => integer".
  287. If not supplied, it defaults to 'len()'
  288. """
  289. self._value_size = 0
  290. if compute_size is None:
  291. self._compute_size = len # type: ignore
  292. else:
  293. self._compute_size = compute_size
  294. self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
  295. LRUCache.__init__(self, max_cache=max(int(max_size / 512), 1))
  296. def add(
  297. self, key: K, value: V, cleanup: Optional[Callable[[K, V], None]] = None
  298. ) -> None:
  299. """Add a new value to the cache.
  300. Also, if the entry is ever removed from the cache, call
  301. cleanup(key, value).
  302. Args:
  303. key: The key to store it under
  304. value: The object to store
  305. cleanup: None or a function taking (key, value) to indicate
  306. 'value' should be cleaned up.
  307. """
  308. if key is _null_key:
  309. raise ValueError("cannot use _null_key as a key")
  310. node = self._cache.get(key, None)
  311. value_len = self._compute_size(value)
  312. if value_len >= self._after_cleanup_size:
  313. # The new value is 'too big to fit', as it would fill up/overflow
  314. # the cache all by itself
  315. if node is not None:
  316. # We won't be replacing the old node, so just remove it
  317. self._remove_node(node)
  318. if cleanup is not None:
  319. cleanup(key, value)
  320. return
  321. if node is None:
  322. node = _LRUNode(key, value, cleanup=cleanup)
  323. self._cache[key] = node
  324. else:
  325. assert node.size is not None
  326. self._value_size -= node.size
  327. node.size = value_len
  328. self._value_size += value_len
  329. self._record_access(node)
  330. if self._value_size > self._max_size:
  331. # Time to cleanup
  332. self.cleanup()
  333. def cleanup(self) -> None:
  334. """Clear the cache until it shrinks to the requested size.
  335. This does not completely wipe the cache, just makes sure it is under
  336. the after_cleanup_size.
  337. """
  338. # Make sure the cache is shrunk to the correct size
  339. while self._value_size > self._after_cleanup_size:
  340. self._remove_lru()
  341. def _remove_node(self, node: _LRUNode[K, V]) -> None:
  342. assert node.size is not None
  343. self._value_size -= node.size
  344. LRUCache._remove_node(self, node)
  345. def resize(self, max_size: int, after_cleanup_size: Optional[int] = None) -> None:
  346. """Change the number of bytes that will be cached."""
  347. self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
  348. max_cache = max(int(max_size / 512), 1)
  349. self._update_max_cache(max_cache)
  350. def _update_max_size(
  351. self, max_size: int, after_cleanup_size: Optional[int] = None
  352. ) -> None:
  353. self._max_size = max_size
  354. if after_cleanup_size is None:
  355. self._after_cleanup_size = self._max_size * 8 // 10
  356. else:
  357. self._after_cleanup_size = min(after_cleanup_size, self._max_size)