test_sparse_patterns.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655
  1. # test_sparse_patterns.py -- Sparse checkout (full and cone mode) pattern handling
  2. # Copyright (C) 2013 Jelmer Vernooij <jelmer@jelmer.uk>
  3. #
  4. # SPDX-License-Identifier: Apache-2.0 OR GPL-2.0-or-later
  5. # Dulwich is dual-licensed under the Apache License, Version 2.0 and the GNU
  6. # General Public License as published by the Free Software Foundation; version 2.0
  7. # or (at your option) any later version. You can redistribute it and/or
  8. # modify it under the terms of either of these two licenses.
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. #
  16. # You should have received a copy of the licenses; if not, see
  17. # <http://www.gnu.org/licenses/> for a copy of the GNU General Public License
  18. # and <http://www.apache.org/licenses/LICENSE-2.0> for a copy of the Apache
  19. # License, Version 2.0.
  20. #
  21. """Tests for dulwich.sparse_patterns."""
  22. import os
  23. import shutil
  24. import tempfile
  25. import time
  26. from dulwich.index import IndexEntry
  27. from dulwich.objects import Blob
  28. from dulwich.repo import Repo
  29. from dulwich.sparse_patterns import (
  30. BlobNotFoundError,
  31. SparseCheckoutConflictError,
  32. apply_included_paths,
  33. compute_included_paths_cone,
  34. compute_included_paths_full,
  35. determine_included_paths,
  36. match_sparse_patterns,
  37. parse_sparse_patterns,
  38. )
  39. from . import TestCase
  40. class ParseSparsePatternsTests(TestCase):
  41. """Test parse_sparse_patterns function."""
  42. def test_empty_and_comment_lines(self):
  43. lines = [
  44. "",
  45. "# comment here",
  46. " ",
  47. "# another comment",
  48. ]
  49. parsed = parse_sparse_patterns(lines)
  50. self.assertEqual(parsed, [])
  51. def test_sparse_pattern_combos(self):
  52. lines = [
  53. "*.py", # Python files anywhere
  54. "!*.md", # markdown files anywhere
  55. "/docs/", # root docs dir
  56. "!/docs/images/", # no root docs/images subdir
  57. "src/", # src dir anywhere
  58. "/*.toml", # root TOML files
  59. "!/*.bak", # no root backup files
  60. "!data/", # no data dirs anywhere
  61. ]
  62. parsed = parse_sparse_patterns(lines)
  63. self.assertEqual(len(parsed), 8)
  64. # Returns a 4-tuple of: (pattern, negation, dir_only, anchored)
  65. self.assertEqual(parsed[0], ("*.py", False, False, False)) # _,_,_
  66. self.assertEqual(parsed[1], ("*.md", True, False, False)) # N,_,_
  67. self.assertEqual(parsed[2], ("docs", False, True, True)) # _,D,A
  68. self.assertEqual(parsed[3], ("docs/images", True, True, True)) # N,D,A
  69. self.assertEqual(parsed[4], ("src", False, True, False)) # _,D,_
  70. self.assertEqual(parsed[5], ("*.toml", False, False, True)) # _,_,A
  71. self.assertEqual(parsed[6], ("*.bak", True, False, True)) # N,_,A
  72. self.assertEqual(parsed[7], ("data", True, True, False)) # N,D,_
  73. class MatchSparsePatternsTests(TestCase):
  74. """Test the match_sparse_patterns function."""
  75. def test_no_patterns_returns_excluded(self):
  76. """If no patterns are provided, by default we treat the path as excluded."""
  77. self.assertFalse(match_sparse_patterns("anyfile.py", []))
  78. def test_last_match_wins(self):
  79. """Checks that the last pattern to match determines included vs excluded."""
  80. parsed = parse_sparse_patterns(
  81. [
  82. "*.py", # include
  83. "!foo.py", # exclude
  84. ]
  85. )
  86. # "foo.py" matches first pattern => included
  87. # then matches second pattern => excluded
  88. self.assertFalse(match_sparse_patterns("foo.py", parsed))
  89. def test_dir_only(self):
  90. """A pattern with a trailing slash should only match directories and subdirectories."""
  91. parsed = parse_sparse_patterns(["docs/"])
  92. # Because we set path_is_dir=False, it won't match
  93. self.assertTrue(
  94. match_sparse_patterns("docs/readme.md", parsed, path_is_dir=False)
  95. )
  96. self.assertTrue(match_sparse_patterns("docs", parsed, path_is_dir=True))
  97. # Even if the path name is "docs", if it's a file, won't match:
  98. self.assertFalse(match_sparse_patterns("docs", parsed, path_is_dir=False))
  99. def test_anchored(self):
  100. """Anchored patterns match from the start of the path only."""
  101. parsed = parse_sparse_patterns(["/foo"])
  102. self.assertTrue(match_sparse_patterns("foo", parsed))
  103. # But "some/foo" doesn't match because anchored requires start
  104. self.assertFalse(match_sparse_patterns("some/foo", parsed))
  105. def test_unanchored_uses_fnmatch(self):
  106. parsed = parse_sparse_patterns(["foo"])
  107. self.assertTrue(match_sparse_patterns("some/foo", parsed))
  108. self.assertFalse(match_sparse_patterns("some/bar", parsed))
  109. def test_anchored_empty_pattern(self):
  110. """Test handling of empty pattern with anchoring (e.g., '/')."""
  111. parsed = parse_sparse_patterns(["/"])
  112. # Check the structure of the parsed empty pattern first
  113. self.assertEqual(parsed, [("", False, False, True)])
  114. # When the pattern is empty with anchoring, it's continued (skipped) in match_sparse_patterns
  115. # for non-empty paths but for empty string it might match due to empty string comparisons
  116. self.assertFalse(match_sparse_patterns("foo", parsed))
  117. # An empty string with empty pattern will match (implementation detail)
  118. self.assertTrue(match_sparse_patterns("", parsed))
  119. def test_anchored_dir_only_exact_match(self):
  120. """Test anchored directory-only patterns with exact matching."""
  121. parsed = parse_sparse_patterns(["/docs/"])
  122. # Test with exact match "docs" and path_is_dir=True
  123. self.assertTrue(match_sparse_patterns("docs", parsed, path_is_dir=True))
  124. # Test with "docs/" (exact match + trailing slash)
  125. self.assertTrue(match_sparse_patterns("docs/", parsed, path_is_dir=True))
  126. def test_complex_anchored_patterns(self):
  127. """Test more complex anchored pattern matching."""
  128. parsed = parse_sparse_patterns(["/dir/subdir"])
  129. # Test exact match
  130. self.assertTrue(match_sparse_patterns("dir/subdir", parsed))
  131. # Test subdirectory path
  132. self.assertTrue(match_sparse_patterns("dir/subdir/file.txt", parsed))
  133. # Test non-matching path
  134. self.assertFalse(match_sparse_patterns("otherdir/subdir", parsed))
  135. def test_pattern_matching_edge_cases(self):
  136. """Test various edge cases in pattern matching."""
  137. # Test exact equality with an anchored pattern
  138. parsed = parse_sparse_patterns(["/foo"])
  139. self.assertTrue(match_sparse_patterns("foo", parsed))
  140. # Test with path_is_dir=True
  141. self.assertTrue(match_sparse_patterns("foo", parsed, path_is_dir=True))
  142. # Test exact match with pattern with dir_only=True
  143. parsed = parse_sparse_patterns(["/bar/"])
  144. self.assertTrue(match_sparse_patterns("bar", parsed, path_is_dir=True))
  145. # Test startswith match for anchored pattern
  146. parsed = parse_sparse_patterns(["/prefix"])
  147. self.assertTrue(match_sparse_patterns("prefix/subdirectory/file.txt", parsed))
  148. class ComputeIncludedPathsFullTests(TestCase):
  149. """Test compute_included_paths_full using a real ephemeral repo index."""
  150. def setUp(self):
  151. super().setUp()
  152. self.temp_dir = tempfile.mkdtemp()
  153. self.addCleanup(shutil.rmtree, self.temp_dir)
  154. self.repo = Repo.init(self.temp_dir)
  155. def _add_file_to_index(self, relpath, content=b"test"):
  156. full = os.path.join(self.temp_dir, relpath)
  157. os.makedirs(os.path.dirname(full), exist_ok=True)
  158. with open(full, "wb") as f:
  159. f.write(content)
  160. # Stage in the index
  161. self.repo.get_worktree().stage([relpath])
  162. def test_basic_inclusion_exclusion(self):
  163. """Given patterns, check correct set of included paths."""
  164. self._add_file_to_index("foo.py", b"print(1)")
  165. self._add_file_to_index("bar.md", b"markdown")
  166. self._add_file_to_index("docs/readme", b"# docs")
  167. lines = [
  168. "*.py", # include all .py
  169. "!bar.*", # exclude bar.md
  170. "docs/", # include docs dir
  171. ]
  172. included = compute_included_paths_full(self.repo.open_index(), lines)
  173. self.assertEqual(included, {"foo.py", "docs/readme"})
  174. def test_full_with_utf8_paths(self):
  175. """Test that UTF-8 encoded paths are handled correctly."""
  176. self._add_file_to_index("unicode/文件.txt", b"unicode content")
  177. self._add_file_to_index("unicode/другой.md", b"more unicode")
  178. # Include all text files
  179. lines = ["*.txt"]
  180. included = compute_included_paths_full(self.repo.open_index(), lines)
  181. self.assertEqual(included, {"unicode/文件.txt"})
  182. class ComputeIncludedPathsConeTests(TestCase):
  183. """Test compute_included_paths_cone with ephemeral repo to see included vs excluded."""
  184. def setUp(self):
  185. super().setUp()
  186. self.temp_dir = tempfile.mkdtemp()
  187. self.addCleanup(shutil.rmtree, self.temp_dir)
  188. self.repo = Repo.init(self.temp_dir)
  189. def _add_file_to_index(self, relpath, content=b"test"):
  190. full = os.path.join(self.temp_dir, relpath)
  191. os.makedirs(os.path.dirname(full), exist_ok=True)
  192. with open(full, "wb") as f:
  193. f.write(content)
  194. self.repo.get_worktree().stage([relpath])
  195. def test_cone_mode_patterns(self):
  196. """Simpler pattern handling in cone mode.
  197. Lines in 'cone' style typically look like:
  198. - /* -> include top-level
  199. - !/*/ -> exclude all subdirs
  200. - /docs/ -> reinclude 'docs' directory
  201. """
  202. self._add_file_to_index("topfile", b"hi")
  203. self._add_file_to_index("docs/readme.md", b"stuff")
  204. self._add_file_to_index("lib/code.py", b"stuff")
  205. lines = [
  206. "/*",
  207. "!/*/",
  208. "/docs/",
  209. ]
  210. included = compute_included_paths_cone(self.repo.open_index(), lines)
  211. # top-level => includes 'topfile'
  212. # subdirs => excluded, except docs/
  213. self.assertEqual(included, {"topfile", "docs/readme.md"})
  214. def test_cone_mode_with_empty_pattern(self):
  215. """Test cone mode with an empty reinclude directory."""
  216. self._add_file_to_index("topfile", b"hi")
  217. self._add_file_to_index("docs/readme.md", b"stuff")
  218. # Include an empty pattern that should be skipped
  219. lines = [
  220. "/*",
  221. "!/*/",
  222. "/", # This empty pattern should be skipped
  223. ]
  224. included = compute_included_paths_cone(self.repo.open_index(), lines)
  225. # Only topfile should be included since the empty pattern is skipped
  226. self.assertEqual(included, {"topfile"})
  227. def test_no_exclude_subdirs(self):
  228. """If lines never specify '!/*/', we include everything by default."""
  229. self._add_file_to_index("topfile", b"hi")
  230. self._add_file_to_index("docs/readme.md", b"stuff")
  231. self._add_file_to_index("lib/code.py", b"stuff")
  232. lines = [
  233. "/*", # top-level
  234. "/docs/", # re-include docs?
  235. ]
  236. included = compute_included_paths_cone(self.repo.open_index(), lines)
  237. # Because exclude_subdirs was never set, everything is included:
  238. self.assertEqual(
  239. included,
  240. {"topfile", "docs/readme.md", "lib/code.py"},
  241. )
  242. def test_only_reinclude_dirs(self):
  243. """Test cone mode when only reinclude directories are specified."""
  244. self._add_file_to_index("topfile", b"hi")
  245. self._add_file_to_index("docs/readme.md", b"stuff")
  246. self._add_file_to_index("lib/code.py", b"stuff")
  247. # Only specify reinclude_dirs, need to explicitly exclude subdirs
  248. lines = ["!/*/", "/docs/"]
  249. included = compute_included_paths_cone(self.repo.open_index(), lines)
  250. # Only docs/* should be included, not topfile or lib/*
  251. self.assertEqual(included, {"docs/readme.md"})
  252. def test_exclude_subdirs_no_toplevel(self):
  253. """Test with exclude_subdirs but without toplevel files."""
  254. self._add_file_to_index("topfile", b"hi")
  255. self._add_file_to_index("docs/readme.md", b"stuff")
  256. self._add_file_to_index("lib/code.py", b"stuff")
  257. # Only exclude subdirs and reinclude docs
  258. lines = ["!/*/", "/docs/"]
  259. included = compute_included_paths_cone(self.repo.open_index(), lines)
  260. # Only docs/* should be included since we didn't include top level
  261. self.assertEqual(included, {"docs/readme.md"})
  262. class DetermineIncludedPathsTests(TestCase):
  263. """Test the top-level determine_included_paths function."""
  264. def setUp(self):
  265. super().setUp()
  266. self.temp_dir = tempfile.mkdtemp()
  267. self.addCleanup(shutil.rmtree, self.temp_dir)
  268. self.repo = Repo.init(self.temp_dir)
  269. def _add_file_to_index(self, relpath):
  270. path = os.path.join(self.temp_dir, relpath)
  271. os.makedirs(os.path.dirname(path), exist_ok=True)
  272. with open(path, "wb") as f:
  273. f.write(b"data")
  274. self.repo.get_worktree().stage([relpath])
  275. def test_full_mode(self):
  276. self._add_file_to_index("foo.py")
  277. self._add_file_to_index("bar.md")
  278. lines = ["*.py", "!bar.*"]
  279. index = self.repo.open_index()
  280. included = determine_included_paths(index, lines, cone=False)
  281. self.assertEqual(included, {"foo.py"})
  282. def test_cone_mode(self):
  283. self._add_file_to_index("topfile")
  284. self._add_file_to_index("subdir/anotherfile")
  285. lines = ["/*", "!/*/"]
  286. index = self.repo.open_index()
  287. included = determine_included_paths(index, lines, cone=True)
  288. self.assertEqual(included, {"topfile"})
  289. class ApplyIncludedPathsTests(TestCase):
  290. """Integration tests for apply_included_paths, verifying skip-worktree bits and file removal."""
  291. def setUp(self):
  292. super().setUp()
  293. self.temp_dir = tempfile.mkdtemp()
  294. self.addCleanup(shutil.rmtree, self.temp_dir)
  295. self.repo = Repo.init(self.temp_dir)
  296. # For testing local_modifications_exist logic, we'll need the normalizer
  297. # plus some real content in the object store.
  298. def _commit_blob(self, relpath, content=b"hello"):
  299. """Create a blob object in object_store, stage an index entry for it."""
  300. full = os.path.join(self.temp_dir, relpath)
  301. os.makedirs(os.path.dirname(full), exist_ok=True)
  302. with open(full, "wb") as f:
  303. f.write(content)
  304. self.repo.get_worktree().stage([relpath])
  305. # Actually commit so the object is in the store
  306. self.repo.get_worktree().commit(
  307. message=b"Commit " + relpath.encode(),
  308. )
  309. def test_set_skip_worktree_bits(self):
  310. """If a path is not in included_paths, skip_worktree bit is set."""
  311. self._commit_blob("keep.py", b"print('keep')")
  312. self._commit_blob("exclude.md", b"# exclude")
  313. included = {"keep.py"}
  314. apply_included_paths(self.repo, included_paths=included, force=False)
  315. idx = self.repo.open_index()
  316. self.assertIn(b"keep.py", idx)
  317. self.assertFalse(idx[b"keep.py"].skip_worktree)
  318. self.assertIn(b"exclude.md", idx)
  319. self.assertTrue(idx[b"exclude.md"].skip_worktree)
  320. # Also check that the exclude.md file was removed from the working tree
  321. exclude_path = os.path.join(self.temp_dir, "exclude.md")
  322. self.assertFalse(os.path.exists(exclude_path))
  323. def test_conflict_with_local_modifications_no_force(self):
  324. """If local modifications exist for an excluded path, raise SparseCheckoutConflictError."""
  325. self._commit_blob("foo.txt", b"original")
  326. # Modify foo.txt on disk
  327. with open(os.path.join(self.temp_dir, "foo.txt"), "ab") as f:
  328. f.write(b" local changes")
  329. with self.assertRaises(SparseCheckoutConflictError):
  330. apply_included_paths(self.repo, included_paths=set(), force=False)
  331. def test_conflict_with_local_modifications_forced_removal(self):
  332. """With force=True, we remove local modifications and skip_worktree the file."""
  333. self._commit_blob("foo.txt", b"original")
  334. with open(os.path.join(self.temp_dir, "foo.txt"), "ab") as f:
  335. f.write(b" local changes")
  336. # This time, pass force=True => file is removed
  337. apply_included_paths(self.repo, included_paths=set(), force=True)
  338. # Check skip-worktree in index
  339. idx = self.repo.open_index()
  340. self.assertTrue(idx[b"foo.txt"].skip_worktree)
  341. # Working tree file removed
  342. self.assertFalse(os.path.exists(os.path.join(self.temp_dir, "foo.txt")))
  343. def test_materialize_included_file_if_missing(self):
  344. """If a path is included but missing from disk, we restore it from the blob in the store."""
  345. self._commit_blob("restored.txt", b"some content")
  346. # Manually remove the file from the working tree
  347. os.remove(os.path.join(self.temp_dir, "restored.txt"))
  348. apply_included_paths(self.repo, included_paths={"restored.txt"}, force=False)
  349. # Should have re-created "restored.txt" from the blob
  350. self.assertTrue(os.path.exists(os.path.join(self.temp_dir, "restored.txt")))
  351. with open(os.path.join(self.temp_dir, "restored.txt"), "rb") as f:
  352. self.assertEqual(f.read(), b"some content")
  353. def test_blob_not_found_raises(self):
  354. """If the object store is missing the blob for an included path, raise BlobNotFoundError."""
  355. # We'll create an entry in the index that references a nonexistent sha
  356. idx = self.repo.open_index()
  357. fake_sha = b"ab" * 20
  358. e = IndexEntry(
  359. ctime=(int(time.time()), 0), # ctime (s, ns)
  360. mtime=(int(time.time()), 0), # mtime (s, ns)
  361. dev=0, # dev
  362. ino=0, # ino
  363. mode=0o100644, # mode
  364. uid=0, # uid
  365. gid=0, # gid
  366. size=0, # size
  367. sha=fake_sha, # sha
  368. flags=0, # flags
  369. extended_flags=0,
  370. )
  371. e.set_skip_worktree(False)
  372. e.sha = fake_sha
  373. idx[(b"missing_file")] = e
  374. idx.write()
  375. with self.assertRaises(BlobNotFoundError):
  376. apply_included_paths(
  377. self.repo, included_paths={"missing_file"}, force=False
  378. )
  379. def test_directory_removal(self):
  380. """Test handling of directories when removing excluded files."""
  381. # Create a directory with a file
  382. dir_path = os.path.join(self.temp_dir, "dir")
  383. os.makedirs(dir_path, exist_ok=True)
  384. self._commit_blob("dir/file.txt", b"content")
  385. # Make sure it exists before we proceed
  386. self.assertTrue(os.path.exists(os.path.join(dir_path, "file.txt")))
  387. # Exclude everything
  388. apply_included_paths(self.repo, included_paths=set(), force=True)
  389. # The file should be removed, but the directory might remain
  390. self.assertFalse(os.path.exists(os.path.join(dir_path, "file.txt")))
  391. # Test when file is actually a directory - should hit the IsADirectoryError case
  392. another_dir_path = os.path.join(self.temp_dir, "another_dir")
  393. os.makedirs(another_dir_path, exist_ok=True)
  394. self._commit_blob("another_dir/subfile.txt", b"content")
  395. # Create a path with the same name as the file but make it a dir to trigger IsADirectoryError
  396. subfile_dir_path = os.path.join(another_dir_path, "subfile.txt")
  397. if os.path.exists(subfile_dir_path):
  398. # Remove any existing file first
  399. os.remove(subfile_dir_path)
  400. os.makedirs(subfile_dir_path, exist_ok=True)
  401. # Attempt to apply sparse checkout, should trigger IsADirectoryError but not fail
  402. apply_included_paths(self.repo, included_paths=set(), force=True)
  403. def test_handling_removed_files(self):
  404. """Test that files already removed from disk are handled correctly during exclusion."""
  405. self._commit_blob("test_file.txt", b"test content")
  406. # Remove the file manually
  407. os.remove(os.path.join(self.temp_dir, "test_file.txt"))
  408. # Should not raise any errors when excluding this file
  409. apply_included_paths(self.repo, included_paths=set(), force=True)
  410. # Verify skip-worktree bit is set in index
  411. idx = self.repo.open_index()
  412. self.assertTrue(idx[b"test_file.txt"].skip_worktree)
  413. def test_local_modifications_ioerror(self):
  414. """Test handling of PermissionError/OSError when checking for local modifications."""
  415. import sys
  416. self._commit_blob("special_file.txt", b"content")
  417. file_path = os.path.join(self.temp_dir, "special_file.txt")
  418. # On Windows, chmod with 0 doesn't make files unreadable the same way
  419. # Skip this test on Windows as the permission model is different
  420. if sys.platform == "win32":
  421. self.skipTest("File permissions work differently on Windows")
  422. # Make the file unreadable on Unix-like systems
  423. os.chmod(file_path, 0)
  424. # Add a cleanup that checks if file exists first
  425. def safe_chmod_cleanup():
  426. if os.path.exists(file_path):
  427. try:
  428. os.chmod(file_path, 0o644)
  429. except (FileNotFoundError, PermissionError):
  430. pass
  431. self.addCleanup(safe_chmod_cleanup)
  432. # Should raise PermissionError with unreadable file and force=False
  433. with self.assertRaises((PermissionError, OSError)):
  434. apply_included_paths(self.repo, included_paths=set(), force=False)
  435. # With force=True, should remove the file anyway
  436. apply_included_paths(self.repo, included_paths=set(), force=True)
  437. # Verify file is gone and skip-worktree bit is set
  438. self.assertFalse(os.path.exists(file_path))
  439. idx = self.repo.open_index()
  440. self.assertTrue(idx[b"special_file.txt"].skip_worktree)
  441. def test_checkout_normalization_applied(self):
  442. """Test that checkout normalization is applied when materializing files during sparse checkout."""
  443. # Create a simple filter that converts content to uppercase
  444. class UppercaseFilter:
  445. def smudge(self, input_bytes, path=b""):
  446. return input_bytes.upper()
  447. def clean(self, input_bytes):
  448. return input_bytes.lower()
  449. def cleanup(self):
  450. pass
  451. def reuse(self, config, filter_name):
  452. return False
  453. # Create .gitattributes file
  454. gitattributes_path = os.path.join(self.temp_dir, ".gitattributes")
  455. with open(gitattributes_path, "w") as f:
  456. f.write("*.txt filter=uppercase\n")
  457. # Add and commit .gitattributes
  458. self.repo.get_worktree().stage([b".gitattributes"])
  459. self.repo.get_worktree().commit(
  460. b"Add gitattributes", committer=b"Test <test@example.com>"
  461. )
  462. # Initialize the filter context and register the filter
  463. _ = self.repo.get_blob_normalizer()
  464. # Register the filter with the cached filter context
  465. uppercase_filter = UppercaseFilter()
  466. self.repo.filter_context.filter_registry.register_driver(
  467. "uppercase", uppercase_filter
  468. )
  469. # Commit a file with lowercase content
  470. self._commit_blob("test.txt", b"hello world")
  471. # Remove the file from working tree to force materialization
  472. os.remove(os.path.join(self.temp_dir, "test.txt"))
  473. # Apply sparse checkout - this will call get_blob_normalizer() internally
  474. # which will use the cached filter_context with our registered filter
  475. apply_included_paths(self.repo, included_paths={"test.txt"}, force=False)
  476. # Verify file was materialized with uppercase content (checkout normalization applied)
  477. with open(os.path.join(self.temp_dir, "test.txt"), "rb") as f:
  478. content = f.read()
  479. self.assertEqual(content, b"HELLO WORLD")
  480. def test_checkout_normalization_with_lf_to_crlf(self):
  481. """Test that line ending normalization is applied during sparse checkout."""
  482. # Commit a file with LF line endings
  483. self._commit_blob("unix_file.txt", b"line1\nline2\nline3\n")
  484. # Remove the file from working tree
  485. os.remove(os.path.join(self.temp_dir, "unix_file.txt"))
  486. # Create a normalizer that converts LF to CRLF on checkout
  487. class CRLFNormalizer:
  488. def checkin_normalize(self, data, path):
  489. # For checkin, return unchanged
  490. return data
  491. def checkout_normalize(self, blob, path):
  492. if isinstance(blob, Blob):
  493. # Convert LF to CRLF
  494. new_blob = Blob()
  495. new_blob.data = blob.data.replace(b"\n", b"\r\n")
  496. return new_blob
  497. return blob
  498. # Monkey patch the repo to use our normalizer
  499. original_get_blob_normalizer = self.repo.get_blob_normalizer
  500. self.repo.get_blob_normalizer = lambda: CRLFNormalizer()
  501. # Apply sparse checkout
  502. apply_included_paths(self.repo, included_paths={"unix_file.txt"}, force=False)
  503. # Verify file was materialized with CRLF line endings
  504. with open(os.path.join(self.temp_dir, "unix_file.txt"), "rb") as f:
  505. content = f.read()
  506. self.assertEqual(content, b"line1\r\nline2\r\nline3\r\n")
  507. # Restore original method
  508. self.repo.get_blob_normalizer = original_get_blob_normalizer
  509. def test_checkout_normalization_not_applied_without_normalizer(self):
  510. """Test that when normalizer returns original blob, no transformation occurs."""
  511. # Commit a file with specific content
  512. original_content = b"original content\nwith newlines\n"
  513. self._commit_blob("no_norm.txt", original_content)
  514. # Remove the file from working tree
  515. os.remove(os.path.join(self.temp_dir, "no_norm.txt"))
  516. # Create a normalizer that returns blob unchanged
  517. class NoOpNormalizer:
  518. def checkin_normalize(self, data, path):
  519. return data
  520. def checkout_normalize(self, blob, path):
  521. # Return the blob unchanged
  522. return blob
  523. # Monkey patch the repo to use our no-op normalizer
  524. original_get_blob_normalizer = self.repo.get_blob_normalizer
  525. self.repo.get_blob_normalizer = lambda: NoOpNormalizer()
  526. # Apply sparse checkout
  527. apply_included_paths(self.repo, included_paths={"no_norm.txt"}, force=False)
  528. # Verify file was materialized with original content (no normalization)
  529. with open(os.path.join(self.temp_dir, "no_norm.txt"), "rb") as f:
  530. content = f.read()
  531. self.assertEqual(content, original_content)
  532. # Restore original method
  533. self.repo.get_blob_normalizer = original_get_blob_normalizer