123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453 |
- # Copyright (C) 2006, 2008 Canonical Ltd
- #
- # SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
- # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
- # General Public License as public by the Free Software Foundation; version 2.0
- # or (at your option) any later version. You can redistribute it and/or
- # modify it under the terms of either of these two licenses.
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- #
- # You should have received a copy of the licenses; if not, see
- # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
- # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
- # License, Version 2.0.
- #
- """Tests for the lru_cache module."""
- from dulwich import lru_cache
- from . import TestCase
- class TestLRUCache(TestCase):
- """Test that LRU cache properly keeps track of entries."""
- def test_cache_size(self) -> None:
- cache = lru_cache.LRUCache(max_cache=10)
- self.assertEqual(10, cache.cache_size())
- cache = lru_cache.LRUCache(max_cache=256)
- self.assertEqual(256, cache.cache_size())
- cache.resize(512)
- self.assertEqual(512, cache.cache_size())
- def test_missing(self) -> None:
- cache = lru_cache.LRUCache(max_cache=10)
- self.assertNotIn("foo", cache)
- self.assertRaises(KeyError, cache.__getitem__, "foo")
- cache["foo"] = "bar"
- self.assertEqual("bar", cache["foo"])
- self.assertIn("foo", cache)
- self.assertNotIn("bar", cache)
- def test_map_None(self) -> None:
- # Make sure that we can properly map None as a key.
- cache = lru_cache.LRUCache(max_cache=10)
- self.assertNotIn(None, cache)
- cache[None] = 1
- self.assertEqual(1, cache[None])
- cache[None] = 2
- self.assertEqual(2, cache[None])
- # Test the various code paths of __getitem__, to make sure that we can
- # handle when None is the key for the LRU and the MRU
- cache[1] = 3
- cache[None] = 1
- cache[None]
- cache[1]
- cache[None]
- self.assertEqual([None, 1], [n.key for n in cache._walk_lru()])
- def test_add__null_key(self) -> None:
- cache = lru_cache.LRUCache(max_cache=10)
- self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
- def test_overflow(self) -> None:
- """Adding extra entries will pop out old ones."""
- cache = lru_cache.LRUCache(max_cache=1, after_cleanup_count=1)
- cache["foo"] = "bar"
- # With a max cache of 1, adding 'baz' should pop out 'foo'
- cache["baz"] = "biz"
- self.assertNotIn("foo", cache)
- self.assertIn("baz", cache)
- self.assertEqual("biz", cache["baz"])
- def test_by_usage(self) -> None:
- """Accessing entries bumps them up in priority."""
- cache = lru_cache.LRUCache(max_cache=2)
- cache["baz"] = "biz"
- cache["foo"] = "bar"
- self.assertEqual("biz", cache["baz"])
- # This must kick out 'foo' because it was the last accessed
- cache["nub"] = "in"
- self.assertNotIn("foo", cache)
- def test_cleanup(self) -> None:
- """Test that we can use a cleanup function."""
- cleanup_called = []
- def cleanup_func(key, val) -> None:
- cleanup_called.append((key, val))
- cache = lru_cache.LRUCache(max_cache=2, after_cleanup_count=2)
- cache.add("baz", "1", cleanup=cleanup_func)
- cache.add("foo", "2", cleanup=cleanup_func)
- cache.add("biz", "3", cleanup=cleanup_func)
- self.assertEqual([("baz", "1")], cleanup_called)
- # 'foo' is now most recent, so final cleanup will call it last
- cache["foo"]
- cache.clear()
- self.assertEqual([("baz", "1"), ("biz", "3"), ("foo", "2")], cleanup_called)
- def test_cleanup_on_replace(self) -> None:
- """Replacing an object should cleanup the old value."""
- cleanup_called = []
- def cleanup_func(key, val) -> None:
- cleanup_called.append((key, val))
- cache = lru_cache.LRUCache(max_cache=2)
- cache.add(1, 10, cleanup=cleanup_func)
- cache.add(2, 20, cleanup=cleanup_func)
- cache.add(2, 25, cleanup=cleanup_func)
- self.assertEqual([(2, 20)], cleanup_called)
- self.assertEqual(25, cache[2])
- # Even __setitem__ should make sure cleanup() is called
- cache[2] = 26
- self.assertEqual([(2, 20), (2, 25)], cleanup_called)
- def test_len(self) -> None:
- cache = lru_cache.LRUCache(max_cache=10, after_cleanup_count=10)
- cache[1] = 10
- cache[2] = 20
- cache[3] = 30
- cache[4] = 40
- self.assertEqual(4, len(cache))
- cache[5] = 50
- cache[6] = 60
- cache[7] = 70
- cache[8] = 80
- self.assertEqual(8, len(cache))
- cache[1] = 15 # replacement
- self.assertEqual(8, len(cache))
- cache[9] = 90
- cache[10] = 100
- cache[11] = 110
- # We hit the max
- self.assertEqual(10, len(cache))
- self.assertEqual(
- [11, 10, 9, 1, 8, 7, 6, 5, 4, 3],
- [n.key for n in cache._walk_lru()],
- )
- def test_cleanup_shrinks_to_after_clean_count(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=3)
- cache.add(1, 10)
- cache.add(2, 20)
- cache.add(3, 25)
- cache.add(4, 30)
- cache.add(5, 35)
- self.assertEqual(5, len(cache))
- # This will bump us over the max, which causes us to shrink down to
- # after_cleanup_cache size
- cache.add(6, 40)
- self.assertEqual(3, len(cache))
- def test_after_cleanup_larger_than_max(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=10)
- self.assertEqual(5, cache._after_cleanup_count)
- def test_after_cleanup_none(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=None)
- # By default _after_cleanup_size is 80% of the normal size
- self.assertEqual(4, cache._after_cleanup_count)
- def test_cleanup_2(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=2)
- # Add these in order
- cache.add(1, 10)
- cache.add(2, 20)
- cache.add(3, 25)
- cache.add(4, 30)
- cache.add(5, 35)
- self.assertEqual(5, len(cache))
- # Force a compaction
- cache.cleanup()
- self.assertEqual(2, len(cache))
- def test_preserve_last_access_order(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5)
- # Add these in order
- cache.add(1, 10)
- cache.add(2, 20)
- cache.add(3, 25)
- cache.add(4, 30)
- cache.add(5, 35)
- self.assertEqual([5, 4, 3, 2, 1], [n.key for n in cache._walk_lru()])
- # Now access some randomly
- cache[2]
- cache[5]
- cache[3]
- cache[2]
- self.assertEqual([2, 3, 5, 4, 1], [n.key for n in cache._walk_lru()])
- def test_get(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5)
- cache.add(1, 10)
- cache.add(2, 20)
- self.assertEqual(20, cache.get(2))
- self.assertEqual(None, cache.get(3))
- obj = object()
- self.assertIs(obj, cache.get(3, obj))
- self.assertEqual([2, 1], [n.key for n in cache._walk_lru()])
- self.assertEqual(10, cache.get(1))
- self.assertEqual([1, 2], [n.key for n in cache._walk_lru()])
- def test_keys(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=5)
- cache[1] = 2
- cache[2] = 3
- cache[3] = 4
- self.assertEqual([1, 2, 3], sorted(cache.keys()))
- cache[4] = 5
- cache[5] = 6
- cache[6] = 7
- self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
- def test_resize_smaller(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
- cache[1] = 2
- cache[2] = 3
- cache[3] = 4
- cache[4] = 5
- cache[5] = 6
- self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
- cache[6] = 7
- self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
- # Now resize to something smaller, which triggers a cleanup
- cache.resize(max_cache=3, after_cleanup_count=2)
- self.assertEqual([5, 6], sorted(cache.keys()))
- # Adding something will use the new size
- cache[7] = 8
- self.assertEqual([5, 6, 7], sorted(cache.keys()))
- cache[8] = 9
- self.assertEqual([7, 8], sorted(cache.keys()))
- def test_resize_larger(self) -> None:
- cache = lru_cache.LRUCache(max_cache=5, after_cleanup_count=4)
- cache[1] = 2
- cache[2] = 3
- cache[3] = 4
- cache[4] = 5
- cache[5] = 6
- self.assertEqual([1, 2, 3, 4, 5], sorted(cache.keys()))
- cache[6] = 7
- self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
- cache.resize(max_cache=8, after_cleanup_count=6)
- self.assertEqual([3, 4, 5, 6], sorted(cache.keys()))
- cache[7] = 8
- cache[8] = 9
- cache[9] = 10
- cache[10] = 11
- self.assertEqual([3, 4, 5, 6, 7, 8, 9, 10], sorted(cache.keys()))
- cache[11] = 12 # triggers cleanup back to new after_cleanup_count
- self.assertEqual([6, 7, 8, 9, 10, 11], sorted(cache.keys()))
- class TestLRUSizeCache(TestCase):
- def test_basic_init(self) -> None:
- cache = lru_cache.LRUSizeCache()
- self.assertEqual(2048, cache._max_cache)
- self.assertEqual(int(cache._max_size * 0.8), cache._after_cleanup_size)
- self.assertEqual(0, cache._value_size)
- def test_add__null_key(self) -> None:
- cache = lru_cache.LRUSizeCache()
- self.assertRaises(ValueError, cache.add, lru_cache._null_key, 1)
- def test_add_tracks_size(self) -> None:
- cache = lru_cache.LRUSizeCache()
- self.assertEqual(0, cache._value_size)
- cache.add("my key", "my value text")
- self.assertEqual(13, cache._value_size)
- def test_remove_tracks_size(self) -> None:
- cache = lru_cache.LRUSizeCache()
- self.assertEqual(0, cache._value_size)
- cache.add("my key", "my value text")
- self.assertEqual(13, cache._value_size)
- node = cache._cache["my key"]
- cache._remove_node(node)
- self.assertEqual(0, cache._value_size)
- def test_no_add_over_size(self) -> None:
- """Adding a large value may not be cached at all."""
- cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
- self.assertEqual(0, cache._value_size)
- self.assertEqual({}, cache.items())
- cache.add("test", "key")
- self.assertEqual(3, cache._value_size)
- self.assertEqual({"test": "key"}, cache.items())
- cache.add("test2", "key that is too big")
- self.assertEqual(3, cache._value_size)
- self.assertEqual({"test": "key"}, cache.items())
- # If we would add a key, only to cleanup and remove all cached entries,
- # then obviously that value should not be stored
- cache.add("test3", "bigkey")
- self.assertEqual(3, cache._value_size)
- self.assertEqual({"test": "key"}, cache.items())
- cache.add("test4", "bikey")
- self.assertEqual(3, cache._value_size)
- self.assertEqual({"test": "key"}, cache.items())
- def test_no_add_over_size_cleanup(self) -> None:
- """If a large value is not cached, we will call cleanup right away."""
- cleanup_calls = []
- def cleanup(key, value) -> None:
- cleanup_calls.append((key, value))
- cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=5)
- self.assertEqual(0, cache._value_size)
- self.assertEqual({}, cache.items())
- cache.add("test", "key that is too big", cleanup=cleanup)
- # key was not added
- self.assertEqual(0, cache._value_size)
- self.assertEqual({}, cache.items())
- # and cleanup was called
- self.assertEqual([("test", "key that is too big")], cleanup_calls)
- def test_adding_clears_cache_based_on_size(self) -> None:
- """The cache is cleared in LRU order until small enough."""
- cache = lru_cache.LRUSizeCache(max_size=20)
- cache.add("key1", "value") # 5 chars
- cache.add("key2", "value2") # 6 chars
- cache.add("key3", "value23") # 7 chars
- self.assertEqual(5 + 6 + 7, cache._value_size)
- cache["key2"] # reference key2 so it gets a newer reference time
- cache.add("key4", "value234") # 8 chars, over limit
- # We have to remove 2 keys to get back under limit
- self.assertEqual(6 + 8, cache._value_size)
- self.assertEqual({"key2": "value2", "key4": "value234"}, cache.items())
- def test_adding_clears_to_after_cleanup_size(self) -> None:
- cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
- cache.add("key1", "value") # 5 chars
- cache.add("key2", "value2") # 6 chars
- cache.add("key3", "value23") # 7 chars
- self.assertEqual(5 + 6 + 7, cache._value_size)
- cache["key2"] # reference key2 so it gets a newer reference time
- cache.add("key4", "value234") # 8 chars, over limit
- # We have to remove 3 keys to get back under limit
- self.assertEqual(8, cache._value_size)
- self.assertEqual({"key4": "value234"}, cache.items())
- def test_custom_sizes(self) -> None:
- def size_of_list(lst):
- return sum(len(x) for x in lst)
- cache = lru_cache.LRUSizeCache(
- max_size=20, after_cleanup_size=10, compute_size=size_of_list
- )
- cache.add("key1", ["val", "ue"]) # 5 chars
- cache.add("key2", ["val", "ue2"]) # 6 chars
- cache.add("key3", ["val", "ue23"]) # 7 chars
- self.assertEqual(5 + 6 + 7, cache._value_size)
- cache["key2"] # reference key2 so it gets a newer reference time
- cache.add("key4", ["value", "234"]) # 8 chars, over limit
- # We have to remove 3 keys to get back under limit
- self.assertEqual(8, cache._value_size)
- self.assertEqual({"key4": ["value", "234"]}, cache.items())
- def test_cleanup(self) -> None:
- cache = lru_cache.LRUSizeCache(max_size=20, after_cleanup_size=10)
- # Add these in order
- cache.add("key1", "value") # 5 chars
- cache.add("key2", "value2") # 6 chars
- cache.add("key3", "value23") # 7 chars
- self.assertEqual(5 + 6 + 7, cache._value_size)
- cache.cleanup()
- # Only the most recent fits after cleaning up
- self.assertEqual(7, cache._value_size)
- def test_keys(self) -> None:
- cache = lru_cache.LRUSizeCache(max_size=10)
- cache[1] = "a"
- cache[2] = "b"
- cache[3] = "cdef"
- self.assertEqual([1, 2, 3], sorted(cache.keys()))
- def test_resize_smaller(self) -> None:
- cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
- cache[1] = "abc"
- cache[2] = "def"
- cache[3] = "ghi"
- cache[4] = "jkl"
- # Triggers a cleanup
- self.assertEqual([2, 3, 4], sorted(cache.keys()))
- # Resize should also cleanup again
- cache.resize(max_size=6, after_cleanup_size=4)
- self.assertEqual([4], sorted(cache.keys()))
- # Adding should use the new max size
- cache[5] = "mno"
- self.assertEqual([4, 5], sorted(cache.keys()))
- cache[6] = "pqr"
- self.assertEqual([6], sorted(cache.keys()))
- def test_resize_larger(self) -> None:
- cache = lru_cache.LRUSizeCache(max_size=10, after_cleanup_size=9)
- cache[1] = "abc"
- cache[2] = "def"
- cache[3] = "ghi"
- cache[4] = "jkl"
- # Triggers a cleanup
- self.assertEqual([2, 3, 4], sorted(cache.keys()))
- cache.resize(max_size=15, after_cleanup_size=12)
- self.assertEqual([2, 3, 4], sorted(cache.keys()))
- cache[5] = "mno"
- cache[6] = "pqr"
- self.assertEqual([2, 3, 4, 5, 6], sorted(cache.keys()))
- cache[7] = "stu"
- self.assertEqual([4, 5, 6, 7], sorted(cache.keys()))
|