lru_cache.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. # lru_cache.py -- Simple LRU cache for dulwich
  2. # Copyright (C) 2006, 2008 Canonical Ltd
  3. # Copyright (C) 2022 Jelmer Vernooij <jelmer@jelmer.uk>
  4. #
  5. # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
  6. # General Public License as public by the Free Software Foundation; version 2.0
  7. # or (at your option) any later version. You can redistribute it and/or
  8. # modify it under the terms of either of these two licenses.
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. # You should have received a copy of the licenses; if not, see
  17. # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
  18. # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
  19. # License, Version 2.0.
  20. #
  21. """A simple least-recently-used (LRU) cache."""
  22. from typing import Callable, Dict, Generic, Iterable, Iterator, Optional, TypeVar
  23. _null_key = object()
  24. K = TypeVar("K")
  25. V = TypeVar("V")
  26. class _LRUNode(Generic[K, V]):
  27. """This maintains the linked-list which is the lru internals."""
  28. __slots__ = ("prev", "next_key", "key", "value", "cleanup", "size")
  29. prev: Optional["_LRUNode[K, V]"]
  30. next_key: K
  31. size: Optional[int]
  32. def __init__(self, key: K, value: V, cleanup=None) -> None:
  33. self.prev = None
  34. self.next_key = _null_key # type: ignore
  35. self.key = key
  36. self.value = value
  37. self.cleanup = cleanup
  38. # TODO: We could compute this 'on-the-fly' like we used to, and remove
  39. # one pointer from this object, we just need to decide if it
  40. # actually costs us much of anything in normal usage
  41. self.size = None
  42. def __repr__(self) -> str:
  43. if self.prev is None:
  44. prev_key = None
  45. else:
  46. prev_key = self.prev.key
  47. return "{}({!r} n:{!r} p:{!r})".format(
  48. self.__class__.__name__,
  49. self.key,
  50. self.next_key,
  51. prev_key,
  52. )
  53. def run_cleanup(self) -> None:
  54. if self.cleanup is not None:
  55. self.cleanup(self.key, self.value)
  56. self.cleanup = None
  57. # Just make sure to break any refcycles, etc
  58. del self.value
  59. class LRUCache(Generic[K, V]):
  60. """A class which manages a cache of entries, removing unused ones."""
  61. _least_recently_used: Optional[_LRUNode[K, V]]
  62. _most_recently_used: Optional[_LRUNode[K, V]]
  63. def __init__(
  64. self, max_cache: int = 100, after_cleanup_count: Optional[int] = None
  65. ) -> None:
  66. self._cache: Dict[K, _LRUNode[K, V]] = {}
  67. # The "HEAD" of the lru linked list
  68. self._most_recently_used = None
  69. # The "TAIL" of the lru linked list
  70. self._least_recently_used = None
  71. self._update_max_cache(max_cache, after_cleanup_count)
  72. def __contains__(self, key: K) -> bool:
  73. return key in self._cache
  74. def __getitem__(self, key: K) -> V:
  75. cache = self._cache
  76. node = cache[key]
  77. # Inlined from _record_access to decrease the overhead of __getitem__
  78. # We also have more knowledge about structure if __getitem__ is
  79. # succeeding, then we know that self._most_recently_used must not be
  80. # None, etc.
  81. mru = self._most_recently_used
  82. if node is mru:
  83. # Nothing to do, this node is already at the head of the queue
  84. return node.value
  85. # Remove this node from the old location
  86. node_prev = node.prev
  87. next_key = node.next_key
  88. # benchmarking shows that the lookup of _null_key in globals is faster
  89. # than the attribute lookup for (node is self._least_recently_used)
  90. if next_key is _null_key:
  91. # 'node' is the _least_recently_used, because it doesn't have a
  92. # 'next' item. So move the current lru to the previous node.
  93. self._least_recently_used = node_prev
  94. else:
  95. node_next = cache[next_key]
  96. node_next.prev = node_prev
  97. assert node_prev
  98. assert mru
  99. node_prev.next_key = next_key
  100. # Insert this node at the front of the list
  101. node.next_key = mru.key
  102. mru.prev = node
  103. self._most_recently_used = node
  104. node.prev = None
  105. return node.value
  106. def __len__(self) -> int:
  107. return len(self._cache)
  108. def _walk_lru(self) -> Iterator[_LRUNode[K, V]]:
  109. """Walk the LRU list, only meant to be used in tests."""
  110. node = self._most_recently_used
  111. if node is not None:
  112. if node.prev is not None:
  113. raise AssertionError(
  114. "the _most_recently_used entry is not"
  115. " supposed to have a previous entry"
  116. f" {node}"
  117. )
  118. while node is not None:
  119. if node.next_key is _null_key:
  120. if node is not self._least_recently_used:
  121. raise AssertionError(
  122. "only the last node should have" f" no next value: {node}"
  123. )
  124. node_next = None
  125. else:
  126. node_next = self._cache[node.next_key]
  127. if node_next.prev is not node:
  128. raise AssertionError(
  129. "inconsistency found, node.next.prev" f" != node: {node}"
  130. )
  131. if node.prev is None:
  132. if node is not self._most_recently_used:
  133. raise AssertionError(
  134. "only the _most_recently_used should"
  135. f" not have a previous node: {node}"
  136. )
  137. else:
  138. if node.prev.next_key != node.key:
  139. raise AssertionError(
  140. "inconsistency found, node.prev.next" f" != node: {node}"
  141. )
  142. yield node
  143. node = node_next
  144. def add(
  145. self, key: K, value: V, cleanup: Optional[Callable[[K, V], None]] = None
  146. ) -> None:
  147. """Add a new value to the cache.
  148. Also, if the entry is ever removed from the cache, call
  149. cleanup(key, value).
  150. Args:
  151. key: The key to store it under
  152. value: The object to store
  153. cleanup: None or a function taking (key, value) to indicate
  154. 'value' should be cleaned up.
  155. """
  156. if key is _null_key:
  157. raise ValueError("cannot use _null_key as a key")
  158. if key in self._cache:
  159. node = self._cache[key]
  160. node.run_cleanup()
  161. node.value = value
  162. node.cleanup = cleanup
  163. else:
  164. node = _LRUNode(key, value, cleanup=cleanup)
  165. self._cache[key] = node
  166. self._record_access(node)
  167. if len(self._cache) > self._max_cache:
  168. # Trigger the cleanup
  169. self.cleanup()
  170. def cache_size(self) -> int:
  171. """Get the number of entries we will cache."""
  172. return self._max_cache
  173. def get(self, key: K, default: Optional[V] = None) -> Optional[V]:
  174. node = self._cache.get(key, None)
  175. if node is None:
  176. return default
  177. self._record_access(node)
  178. return node.value
  179. def keys(self) -> Iterable[K]:
  180. """Get the list of keys currently cached.
  181. Note that values returned here may not be available by the time you
  182. request them later. This is simply meant as a peak into the current
  183. state.
  184. Returns: An unordered list of keys that are currently cached.
  185. """
  186. return self._cache.keys()
  187. def items(self) -> Dict[K, V]:
  188. """Get the key:value pairs as a dict."""
  189. return {k: n.value for k, n in self._cache.items()}
  190. def cleanup(self):
  191. """Clear the cache until it shrinks to the requested size.
  192. This does not completely wipe the cache, just makes sure it is under
  193. the after_cleanup_count.
  194. """
  195. # Make sure the cache is shrunk to the correct size
  196. while len(self._cache) > self._after_cleanup_count:
  197. self._remove_lru()
  198. def __setitem__(self, key: K, value: V) -> None:
  199. """Add a value to the cache, there will be no cleanup function."""
  200. self.add(key, value, cleanup=None)
  201. def _record_access(self, node: _LRUNode[K, V]) -> None:
  202. """Record that key was accessed."""
  203. # Move 'node' to the front of the queue
  204. if self._most_recently_used is None:
  205. self._most_recently_used = node
  206. self._least_recently_used = node
  207. return
  208. elif node is self._most_recently_used:
  209. # Nothing to do, this node is already at the head of the queue
  210. return
  211. # We've taken care of the tail pointer, remove the node, and insert it
  212. # at the front
  213. # REMOVE
  214. if node is self._least_recently_used:
  215. self._least_recently_used = node.prev
  216. if node.prev is not None:
  217. node.prev.next_key = node.next_key
  218. if node.next_key is not _null_key:
  219. node_next = self._cache[node.next_key]
  220. node_next.prev = node.prev
  221. # INSERT
  222. node.next_key = self._most_recently_used.key
  223. self._most_recently_used.prev = node
  224. self._most_recently_used = node
  225. node.prev = None
  226. def _remove_node(self, node: _LRUNode[K, V]) -> None:
  227. if node is self._least_recently_used:
  228. self._least_recently_used = node.prev
  229. self._cache.pop(node.key)
  230. # If we have removed all entries, remove the head pointer as well
  231. if self._least_recently_used is None:
  232. self._most_recently_used = None
  233. node.run_cleanup()
  234. # Now remove this node from the linked list
  235. if node.prev is not None:
  236. node.prev.next_key = node.next_key
  237. if node.next_key is not _null_key:
  238. node_next = self._cache[node.next_key]
  239. node_next.prev = node.prev
  240. # And remove this node's pointers
  241. node.prev = None
  242. node.next_key = _null_key # type: ignore
  243. def _remove_lru(self) -> None:
  244. """Remove one entry from the lru, and handle consequences.
  245. If there are no more references to the lru, then this entry should be
  246. removed from the cache.
  247. """
  248. assert self._least_recently_used
  249. self._remove_node(self._least_recently_used)
  250. def clear(self) -> None:
  251. """Clear out all of the cache."""
  252. # Clean up in LRU order
  253. while self._cache:
  254. self._remove_lru()
  255. def resize(self, max_cache: int, after_cleanup_count: Optional[int] = None) -> None:
  256. """Change the number of entries that will be cached."""
  257. self._update_max_cache(max_cache, after_cleanup_count=after_cleanup_count)
  258. def _update_max_cache(self, max_cache, after_cleanup_count=None):
  259. self._max_cache = max_cache
  260. if after_cleanup_count is None:
  261. self._after_cleanup_count = self._max_cache * 8 / 10
  262. else:
  263. self._after_cleanup_count = min(after_cleanup_count, self._max_cache)
  264. self.cleanup()
  265. class LRUSizeCache(LRUCache[K, V]):
  266. """An LRUCache that removes things based on the size of the values.
  267. This differs in that it doesn't care how many actual items there are,
  268. it just restricts the cache to be cleaned up after so much data is stored.
  269. The size of items added will be computed using compute_size(value), which
  270. defaults to len() if not supplied.
  271. """
  272. _compute_size: Callable[[V], int]
  273. def __init__(
  274. self,
  275. max_size: int = 1024 * 1024,
  276. after_cleanup_size: Optional[int] = None,
  277. compute_size: Optional[Callable[[V], int]] = None,
  278. ) -> None:
  279. """Create a new LRUSizeCache.
  280. Args:
  281. max_size: The max number of bytes to store before we start
  282. clearing out entries.
  283. after_cleanup_size: After cleaning up, shrink everything to this
  284. size.
  285. compute_size: A function to compute the size of the values. We
  286. use a function here, so that you can pass 'len' if you are just
  287. using simple strings, or a more complex function if you are using
  288. something like a list of strings, or even a custom object.
  289. The function should take the form "compute_size(value) => integer".
  290. If not supplied, it defaults to 'len()'
  291. """
  292. self._value_size = 0
  293. if compute_size is None:
  294. self._compute_size = len # type: ignore
  295. else:
  296. self._compute_size = compute_size
  297. self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
  298. LRUCache.__init__(self, max_cache=max(int(max_size / 512), 1))
  299. def add(
  300. self, key: K, value: V, cleanup: Optional[Callable[[K, V], None]] = None
  301. ) -> None:
  302. """Add a new value to the cache.
  303. Also, if the entry is ever removed from the cache, call
  304. cleanup(key, value).
  305. Args:
  306. key: The key to store it under
  307. value: The object to store
  308. cleanup: None or a function taking (key, value) to indicate
  309. 'value' should be cleaned up.
  310. """
  311. if key is _null_key:
  312. raise ValueError("cannot use _null_key as a key")
  313. node = self._cache.get(key, None)
  314. value_len = self._compute_size(value)
  315. if value_len >= self._after_cleanup_size:
  316. # The new value is 'too big to fit', as it would fill up/overflow
  317. # the cache all by itself
  318. if node is not None:
  319. # We won't be replacing the old node, so just remove it
  320. self._remove_node(node)
  321. if cleanup is not None:
  322. cleanup(key, value)
  323. return
  324. if node is None:
  325. node = _LRUNode(key, value, cleanup=cleanup)
  326. self._cache[key] = node
  327. else:
  328. assert node.size is not None
  329. self._value_size -= node.size
  330. node.size = value_len
  331. self._value_size += value_len
  332. self._record_access(node)
  333. if self._value_size > self._max_size:
  334. # Time to cleanup
  335. self.cleanup()
  336. def cleanup(self) -> None:
  337. """Clear the cache until it shrinks to the requested size.
  338. This does not completely wipe the cache, just makes sure it is under
  339. the after_cleanup_size.
  340. """
  341. # Make sure the cache is shrunk to the correct size
  342. while self._value_size > self._after_cleanup_size:
  343. self._remove_lru()
  344. def _remove_node(self, node: _LRUNode[K, V]) -> None:
  345. assert node.size is not None
  346. self._value_size -= node.size
  347. LRUCache._remove_node(self, node)
  348. def resize(self, max_size: int, after_cleanup_size: Optional[int] = None) -> None:
  349. """Change the number of bytes that will be cached."""
  350. self._update_max_size(max_size, after_cleanup_size=after_cleanup_size)
  351. max_cache = max(int(max_size / 512), 1)
  352. self._update_max_cache(max_cache)
  353. def _update_max_size(
  354. self, max_size: int, after_cleanup_size: Optional[int] = None
  355. ) -> None:
  356. self._max_size = max_size
  357. if after_cleanup_size is None:
  358. self._after_cleanup_size = self._max_size * 8 // 10
  359. else:
  360. self._after_cleanup_size = min(after_cleanup_size, self._max_size)