View Javadoc
1   /*
2    * Copyright (C) 2011, Google Inc. and others
3    *
4    * This program and the accompanying materials are made available under the
5    * terms of the Eclipse Distribution License v. 1.0 which is available at
6    * https://www.eclipse.org/org/documents/edl-v10.php.
7    *
8    * SPDX-License-Identifier: BSD-3-Clause
9    */
10  
11  package org.eclipse.jgit.internal.storage.dfs;
12  
13  import static java.util.stream.Collectors.joining;
14  
15  import java.io.FileNotFoundException;
16  import java.io.IOException;
17  import java.util.ArrayList;
18  import java.util.Arrays;
19  import java.util.Collection;
20  import java.util.Collections;
21  import java.util.Comparator;
22  import java.util.HashMap;
23  import java.util.HashSet;
24  import java.util.List;
25  import java.util.Map;
26  import java.util.Set;
27  import java.util.concurrent.atomic.AtomicReference;
28  
29  import org.eclipse.jgit.internal.storage.pack.PackExt;
30  import org.eclipse.jgit.lib.AnyObjectId;
31  import org.eclipse.jgit.lib.ObjectDatabase;
32  import org.eclipse.jgit.lib.ObjectInserter;
33  import org.eclipse.jgit.lib.ObjectReader;
34  
35  /**
36   * Manages objects stored in
37   * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackFile} on a storage
38   * system.
39   */
40  public abstract class DfsObjDatabase extends ObjectDatabase {
41  	private static final PackList NO_PACKS = new PackList(
42  			new DfsPackFile[0],
43  			new DfsReftable[0]) {
44  		@Override
45  		boolean dirty() {
46  			return true;
47  		}
48  
49  		@Override
50  		void clearDirty() {
51  			// Always dirty.
52  		}
53  
54  		@Override
55  		public void markDirty() {
56  			// Always dirty.
57  		}
58  	};
59  
60  	/**
61  	 * Sources for a pack file.
62  	 * <p>
63  	 * <strong>Note:</strong> When sorting packs by source, do not use the default
64  	 * comparator based on {@link Enum#compareTo}. Prefer {@link
65  	 * #DEFAULT_COMPARATOR} or your own {@link ComparatorBuilder}.
66  	 */
67  	public enum PackSource {
68  		/** The pack is created by ObjectInserter due to local activity. */
69  		INSERT,
70  
71  		/**
72  		 * The pack is created by PackParser due to a network event.
73  		 * <p>
74  		 * A received pack can be from either a push into the repository, or a
75  		 * fetch into the repository, the direction doesn't matter. A received
76  		 * pack was built by the remote Git implementation and may not match the
77  		 * storage layout preferred by this version. Received packs are likely
78  		 * to be either compacted or garbage collected in the future.
79  		 */
80  		RECEIVE,
81  
82  		/**
83  		 * The pack was created by compacting multiple packs together.
84  		 * <p>
85  		 * Packs created by compacting multiple packs together aren't nearly as
86  		 * efficient as a fully garbage collected repository, but may save disk
87  		 * space by reducing redundant copies of base objects.
88  		 *
89  		 * @see DfsPackCompactor
90  		 */
91  		COMPACT,
92  
93  		/**
94  		 * Pack was created by Git garbage collection by this implementation.
95  		 * <p>
96  		 * This source is only used by the {@link DfsGarbageCollector} when it
97  		 * builds a pack file by traversing the object graph and copying all
98  		 * reachable objects into a new pack stream.
99  		 *
100 		 * @see DfsGarbageCollector
101 		 */
102 		GC,
103 
104 		/** Created from non-heads by {@link DfsGarbageCollector}. */
105 		GC_REST,
106 
107 		/**
108 		 * RefTreeGraph pack was created by Git garbage collection.
109 		 *
110 		 * @see DfsGarbageCollector
111 		 */
112 		GC_TXN,
113 
114 		/**
115 		 * Pack was created by Git garbage collection.
116 		 * <p>
117 		 * This pack contains only unreachable garbage that was found during the
118 		 * last GC pass. It is retained in a new pack until it is safe to prune
119 		 * these objects from the repository.
120 		 */
121 		UNREACHABLE_GARBAGE;
122 
123 		/**
124 		 * Default comparator for sources.
125 		 * <p>
126 		 * Sorts generally newer, smaller types such as {@code INSERT} and {@code
127 		 * RECEIVE} earlier; older, larger types such as {@code GC} later; and
128 		 * {@code UNREACHABLE_GARBAGE} at the end.
129 		 */
130 		public static final Comparator<PackSource> DEFAULT_COMPARATOR =
131 				new ComparatorBuilder()
132 						.add(INSERT, RECEIVE)
133 						.add(COMPACT)
134 						.add(GC)
135 						.add(GC_REST)
136 						.add(GC_TXN)
137 						.add(UNREACHABLE_GARBAGE)
138 						.build();
139 
140 		/**
141 		 * Builder for describing {@link PackSource} ordering where some values are
142 		 * explicitly considered equal to others.
143 		 */
144 		public static class ComparatorBuilder {
145 			private final Map<PackSource, Integer> ranks = new HashMap<>();
146 			private int counter;
147 
148 			/**
149 			 * Add a collection of sources that should sort as equal.
150 			 * <p>
151 			 * Sources in the input will sort after sources listed in previous calls
152 			 * to this method.
153 			 *
154 			 * @param sources
155 			 *            sources in this equivalence class.
156 			 * @return this.
157 			 */
158 			public ComparatorBuilder add(PackSource... sources) {
159 				for (PackSource s : sources) {
160 					ranks.put(s, Integer.valueOf(counter));
161 				}
162 				counter++;
163 				return this;
164 			}
165 
166 			/**
167 			 * Build the comparator.
168 			 *
169 			 * @return new comparator instance.
170 			 * @throws IllegalArgumentException
171 			 *             not all {@link PackSource} instances were explicitly assigned
172 			 *             an equivalence class.
173 			 */
174 			public Comparator<PackSource> build() {
175 				return new PackSourceComparator(ranks);
176 			}
177 		}
178 
179 		private static class PackSourceComparator implements Comparator<PackSource> {
180 			private final Map<PackSource, Integer> ranks;
181 
182 			private PackSourceComparator(Map<PackSource, Integer> ranks) {
183 				if (!ranks.keySet().equals(
184 							new HashSet<>(Arrays.asList(PackSource.values())))) {
185 					throw new IllegalArgumentException();
186 				}
187 				this.ranks = new HashMap<>(ranks);
188 			}
189 
190 			@Override
191 			public int compare(PackSource a, PackSource b) {
192 				return ranks.get(a).compareTo(ranks.get(b));
193 			}
194 
195 			@Override
196 			public String toString() {
197 				return Arrays.stream(PackSource.values())
198 						.map(s -> s + "=" + ranks.get(s)) //$NON-NLS-1$
199 						.collect(joining(", ", getClass().getSimpleName() + "{", "}")); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
200 			}
201 		}
202 	}
203 
204 	private final AtomicReference<PackList> packList;
205 
206 	private final DfsRepository repository;
207 
208 	private DfsReaderOptions readerOptions;
209 
210 	private Comparator<DfsPackDescription> packComparator;
211 
212 	/**
213 	 * Initialize an object database for our repository.
214 	 *
215 	 * @param repository
216 	 *            repository owning this object database.
217 	 * @param options
218 	 *            how readers should access the object database.
219 	 */
220 	protected DfsObjDatabase(DfsRepository repository,
221 			DfsReaderOptions options) {
222 		this.repository = repository;
223 		this.packList = new AtomicReference<>(NO_PACKS);
224 		this.readerOptions = options;
225 		this.packComparator = DfsPackDescription.objectLookupComparator();
226 	}
227 
228 	/**
229 	 * Get configured reader options, such as read-ahead.
230 	 *
231 	 * @return configured reader options, such as read-ahead.
232 	 */
233 	public DfsReaderOptions getReaderOptions() {
234 		return readerOptions;
235 	}
236 
237 	/**
238 	 * Set the comparator used when searching for objects across packs.
239 	 * <p>
240 	 * An optimal comparator will find more objects without having to load large
241 	 * idx files from storage only to find that they don't contain the object.
242 	 * See {@link DfsPackDescription#objectLookupComparator()} for the default
243 	 * heuristics.
244 	 *
245 	 * @param packComparator
246 	 *            comparator.
247 	 */
248 	public void setPackComparator(Comparator<DfsPackDescription> packComparator) {
249 		this.packComparator = packComparator;
250 	}
251 
252 	/** {@inheritDoc} */
253 	@Override
254 	public DfsReader newReader() {
255 		return new DfsReader(this);
256 	}
257 
258 	/** {@inheritDoc} */
259 	@Override
260 	public ObjectInserter newInserter() {
261 		return new DfsInserter(this);
262 	}
263 
264 	/**
265 	 * Scan and list all available pack files in the repository.
266 	 *
267 	 * @return list of available packs. The returned array is shared with the
268 	 *         implementation and must not be modified by the caller.
269 	 * @throws java.io.IOException
270 	 *             the pack list cannot be initialized.
271 	 */
272 	public DfsPackFile[] getPacks() throws IOException {
273 		return getPackList().packs;
274 	}
275 
276 	/**
277 	 * Scan and list all available reftable files in the repository.
278 	 *
279 	 * @return list of available reftables. The returned array is shared with
280 	 *         the implementation and must not be modified by the caller.
281 	 * @throws java.io.IOException
282 	 *             the pack list cannot be initialized.
283 	 */
284 	public DfsReftable[] getReftables() throws IOException {
285 		return getPackList().reftables;
286 	}
287 
288 	/**
289 	 * Scan and list all available pack files in the repository.
290 	 *
291 	 * @return list of available packs, with some additional metadata. The
292 	 *         returned array is shared with the implementation and must not be
293 	 *         modified by the caller.
294 	 * @throws java.io.IOException
295 	 *             the pack list cannot be initialized.
296 	 */
297 	public PackList getPackList() throws IOException {
298 		return scanPacks(NO_PACKS);
299 	}
300 
301 	/**
302 	 * Get repository owning this object database.
303 	 *
304 	 * @return repository owning this object database.
305 	 */
306 	protected DfsRepository getRepository() {
307 		return repository;
308 	}
309 
310 	/**
311 	 * List currently known pack files in the repository, without scanning.
312 	 *
313 	 * @return list of available packs. The returned array is shared with the
314 	 *         implementation and must not be modified by the caller.
315 	 */
316 	public DfsPackFile[] getCurrentPacks() {
317 		return getCurrentPackList().packs;
318 	}
319 
320 	/**
321 	 * List currently known reftable files in the repository, without scanning.
322 	 *
323 	 * @return list of available reftables. The returned array is shared with
324 	 *         the implementation and must not be modified by the caller.
325 	 */
326 	public DfsReftable[] getCurrentReftables() {
327 		return getCurrentPackList().reftables;
328 	}
329 
330 	/**
331 	 * List currently known pack files in the repository, without scanning.
332 	 *
333 	 * @return list of available packs, with some additional metadata. The
334 	 *         returned array is shared with the implementation and must not be
335 	 *         modified by the caller.
336 	 */
337 	public PackList getCurrentPackList() {
338 		return packList.get();
339 	}
340 
341 	/**
342 	 * Does the requested object exist in this database?
343 	 * <p>
344 	 * This differs from ObjectDatabase's implementation in that we can selectively
345 	 * ignore unreachable (garbage) objects.
346 	 *
347 	 * @param objectId
348 	 *            identity of the object to test for existence of.
349 	 * @param avoidUnreachableObjects
350 	 *            if true, ignore objects that are unreachable.
351 	 * @return true if the specified object is stored in this database.
352 	 * @throws java.io.IOException
353 	 *             the object store cannot be accessed.
354 	 */
355 	public boolean has(AnyObjectId objectId, boolean avoidUnreachableObjects)
356 			throws IOException {
357 		try (ObjectReader or = newReader()) {
358 			or.setAvoidUnreachableObjects(avoidUnreachableObjects);
359 			return or.has(objectId);
360 		}
361 	}
362 
363 	/**
364 	 * Generate a new unique name for a pack file.
365 	 *
366 	 * @param source
367 	 *            where the pack stream is created.
368 	 * @return a unique name for the pack file. Must not collide with any other
369 	 *         pack file name in the same DFS.
370 	 * @throws java.io.IOException
371 	 *             a new unique pack description cannot be generated.
372 	 */
373 	protected abstract DfsPackDescription newPack(PackSource source)
374 			throws IOException;
375 
376 	/**
377 	 * Generate a new unique name for a pack file.
378 	 *
379 	 * <p>
380 	 * Default implementation of this method would be equivalent to
381 	 * {@code newPack(source).setEstimatedPackSize(estimatedPackSize)}. But the
382 	 * clients can override this method to use the given
383 	 * {@code estomatedPackSize} value more efficiently in the process of
384 	 * creating a new
385 	 * {@link org.eclipse.jgit.internal.storage.dfs.DfsPackDescription} object.
386 	 *
387 	 * @param source
388 	 *            where the pack stream is created.
389 	 * @param estimatedPackSize
390 	 *            the estimated size of the pack.
391 	 * @return a unique name for the pack file. Must not collide with any other
392 	 *         pack file name in the same DFS.
393 	 * @throws java.io.IOException
394 	 *             a new unique pack description cannot be generated.
395 	 */
396 	protected DfsPackDescription newPack(PackSource source,
397 			long estimatedPackSize) throws IOException {
398 		DfsPackDescription pack = newPack(source);
399 		pack.setEstimatedPackSize(estimatedPackSize);
400 		return pack;
401 	}
402 
403 	/**
404 	 * Commit a pack and index pair that was written to the DFS.
405 	 * <p>
406 	 * Committing the pack/index pair makes them visible to readers. The JGit
407 	 * DFS code always writes the pack, then the index. This allows a simple
408 	 * commit process to do nothing if readers always look for both files to
409 	 * exist and the DFS performs atomic creation of the file (e.g. stream to a
410 	 * temporary file and rename to target on close).
411 	 * <p>
412 	 * During pack compaction or GC the new pack file may be replacing other
413 	 * older files. Implementations should remove those older files (if any) as
414 	 * part of the commit of the new file.
415 	 * <p>
416 	 * This method is a trivial wrapper around
417 	 * {@link #commitPackImpl(Collection, Collection)} that calls the
418 	 * implementation and fires events.
419 	 *
420 	 * @param desc
421 	 *            description of the new packs.
422 	 * @param replaces
423 	 *            if not null, list of packs to remove.
424 	 * @throws java.io.IOException
425 	 *             the packs cannot be committed. On failure a rollback must
426 	 *             also be attempted by the caller.
427 	 */
428 	protected void commitPack(Collection<DfsPackDescription> desc,
429 			Collection<DfsPackDescription> replaces) throws IOException {
430 		commitPackImpl(desc, replaces);
431 		getRepository().fireEvent(new DfsPacksChangedEvent());
432 	}
433 
434 	/**
435 	 * Implementation of pack commit.
436 	 *
437 	 * @see #commitPack(Collection, Collection)
438 	 * @param desc
439 	 *            description of the new packs.
440 	 * @param replaces
441 	 *            if not null, list of packs to remove.
442 	 * @throws java.io.IOException
443 	 *             the packs cannot be committed.
444 	 */
445 	protected abstract void commitPackImpl(Collection<DfsPackDescription> desc,
446 			Collection<DfsPackDescription> replaces) throws IOException;
447 
448 	/**
449 	 * Try to rollback a pack creation.
450 	 * <p>
451 	 * JGit DFS always writes the pack first, then the index. If the pack does
452 	 * not yet exist, then neither does the index. A safe DFS implementation
453 	 * would try to remove both files to ensure they are really gone.
454 	 * <p>
455 	 * A rollback does not support failures, as it only occurs when there is
456 	 * already a failure in progress. A DFS implementor may wish to log
457 	 * warnings/error messages when a rollback fails, but should not send new
458 	 * exceptions up the Java callstack.
459 	 *
460 	 * @param desc
461 	 *            pack to delete.
462 	 */
463 	protected abstract void rollbackPack(Collection<DfsPackDescription> desc);
464 
465 	/**
466 	 * List the available pack files.
467 	 * <p>
468 	 * The returned list must support random access and must be mutable by the
469 	 * caller. It is sorted in place using the natural sorting of the returned
470 	 * DfsPackDescription objects.
471 	 *
472 	 * @return available packs. May be empty if there are no packs.
473 	 * @throws java.io.IOException
474 	 *             the packs cannot be listed and the object database is not
475 	 *             functional to the caller.
476 	 */
477 	protected abstract List<DfsPackDescription> listPacks() throws IOException;
478 
479 	/**
480 	 * Open a pack, pack index, or other related file for reading.
481 	 *
482 	 * @param desc
483 	 *            description of pack related to the data that will be read.
484 	 *            This is an instance previously obtained from
485 	 *            {@link #listPacks()}, but not necessarily from the same
486 	 *            DfsObjDatabase instance.
487 	 * @param ext
488 	 *            file extension that will be read i.e "pack" or "idx".
489 	 * @return channel to read the file.
490 	 * @throws java.io.FileNotFoundException
491 	 *             the file does not exist.
492 	 * @throws java.io.IOException
493 	 *             the file cannot be opened.
494 	 */
495 	protected abstract ReadableChannel openFile(
496 			DfsPackDescription desc, PackExt ext)
497 			throws FileNotFoundException, IOException;
498 
499 	/**
500 	 * Open a pack, pack index, or other related file for writing.
501 	 *
502 	 * @param desc
503 	 *            description of pack related to the data that will be written.
504 	 *            This is an instance previously obtained from
505 	 *            {@link #newPack(PackSource)}.
506 	 * @param ext
507 	 *            file extension that will be written i.e "pack" or "idx".
508 	 * @return channel to write the file.
509 	 * @throws java.io.IOException
510 	 *             the file cannot be opened.
511 	 */
512 	protected abstract DfsOutputStream writeFile(
513 			DfsPackDescription desc, PackExt ext) throws IOException;
514 
515 	void addPack(DfsPackFile newPack) throws IOException {
516 		PackList o, n;
517 		do {
518 			o = packList.get();
519 			if (o == NO_PACKS) {
520 				// The repository may not have needed any existing objects to
521 				// complete the current task of creating a pack (e.g. push of a
522 				// pack with no external deltas). Because we don't scan for
523 				// newly added packs on missed object lookups, scan now to
524 				// make sure all older packs are available in the packList.
525 				o = scanPacks(o);
526 
527 				// Its possible the scan identified the pack we were asked to
528 				// add, as the pack was already committed via commitPack().
529 				// If this is the case return without changing the list.
530 				for (DfsPackFile p : o.packs) {
531 					if (p.key.equals(newPack.key)) {
532 						return;
533 					}
534 				}
535 			}
536 
537 			DfsPackFile[] packs = new DfsPackFile[1 + o.packs.length];
538 			packs[0] = newPack;
539 			System.arraycopy(o.packs, 0, packs, 1, o.packs.length);
540 			n = new PackListImpl(packs, o.reftables);
541 		} while (!packList.compareAndSet(o, n));
542 	}
543 
544 	void addReftable(DfsPackDescription add, Set<DfsPackDescription> remove)
545 			throws IOException {
546 		PackList o, n;
547 		do {
548 			o = packList.get();
549 			if (o == NO_PACKS) {
550 				o = scanPacks(o);
551 				for (DfsReftable t : o.reftables) {
552 					if (t.getPackDescription().equals(add)) {
553 						return;
554 					}
555 				}
556 			}
557 
558 			List<DfsReftable> tables = new ArrayList<>(1 + o.reftables.length);
559 			for (DfsReftable t : o.reftables) {
560 				if (!remove.contains(t.getPackDescription())) {
561 					tables.add(t);
562 				}
563 			}
564 			tables.add(new DfsReftable(add));
565 			n = new PackListImpl(o.packs, tables.toArray(new DfsReftable[0]));
566 		} while (!packList.compareAndSet(o, n));
567 	}
568 
569 	PackList scanPacks(PackList original) throws IOException {
570 		PackList o, n;
571 		synchronized (packList) {
572 			do {
573 				o = packList.get();
574 				if (o != original) {
575 					// Another thread did the scan for us, while we
576 					// were blocked on the monitor above.
577 					//
578 					return o;
579 				}
580 				n = scanPacksImpl(o);
581 				if (n == o)
582 					return n;
583 			} while (!packList.compareAndSet(o, n));
584 		}
585 		getRepository().fireEvent(new DfsPacksChangedEvent());
586 		return n;
587 	}
588 
589 	private PackList scanPacksImpl(PackList old) throws IOException {
590 		DfsBlockCache cache = DfsBlockCache.getInstance();
591 		Map<DfsPackDescription, DfsPackFile> packs = packMap(old);
592 		Map<DfsPackDescription, DfsReftable> reftables = reftableMap(old);
593 
594 		List<DfsPackDescription> scanned = listPacks();
595 		Collections.sort(scanned, packComparator);
596 
597 		List<DfsPackFile> newPacks = new ArrayList<>(scanned.size());
598 		List<DfsReftable> newReftables = new ArrayList<>(scanned.size());
599 		boolean foundNew = false;
600 		for (DfsPackDescription dsc : scanned) {
601 			DfsPackFile oldPack = packs.remove(dsc);
602 			if (oldPack != null) {
603 				newPacks.add(oldPack);
604 			} else if (dsc.hasFileExt(PackExt.PACK)) {
605 				newPacks.add(new DfsPackFile(cache, dsc));
606 				foundNew = true;
607 			}
608 
609 			DfsReftable oldReftable = reftables.remove(dsc);
610 			if (oldReftable != null) {
611 				newReftables.add(oldReftable);
612 			} else if (dsc.hasFileExt(PackExt.REFTABLE)) {
613 				newReftables.add(new DfsReftable(cache, dsc));
614 				foundNew = true;
615 			}
616 		}
617 
618 		if (newPacks.isEmpty() && newReftables.isEmpty())
619 			return new PackListImpl(NO_PACKS.packs, NO_PACKS.reftables);
620 		if (!foundNew) {
621 			old.clearDirty();
622 			return old;
623 		}
624 		Collections.sort(newReftables, reftableComparator());
625 		return new PackListImpl(
626 				newPacks.toArray(new DfsPackFile[0]),
627 				newReftables.toArray(new DfsReftable[0]));
628 	}
629 
630 	private static Map<DfsPackDescription, DfsPackFile> packMap(PackList old) {
631 		Map<DfsPackDescription, DfsPackFile> forReuse = new HashMap<>();
632 		for (DfsPackFile p : old.packs) {
633 			if (!p.invalid()) {
634 				forReuse.put(p.desc, p);
635 			}
636 		}
637 		return forReuse;
638 	}
639 
640 	private static Map<DfsPackDescription, DfsReftable> reftableMap(PackList old) {
641 		Map<DfsPackDescription, DfsReftable> forReuse = new HashMap<>();
642 		for (DfsReftable p : old.reftables) {
643 			if (!p.invalid()) {
644 				forReuse.put(p.desc, p);
645 			}
646 		}
647 		return forReuse;
648 	}
649 
650 	/**
651 	 * Get comparator to sort {@link DfsReftable} by priority.
652 	 *
653 	 * @return comparator to sort {@link DfsReftable} by priority.
654 	 */
655 	protected Comparator<DfsReftable> reftableComparator() {
656 		return Comparator.comparing(
657 				DfsReftable::getPackDescription,
658 				DfsPackDescription.reftableComparator());
659 	}
660 
661 	/**
662 	 * Clears the cached list of packs, forcing them to be scanned again.
663 	 */
664 	protected void clearCache() {
665 		packList.set(NO_PACKS);
666 	}
667 
668 	/** {@inheritDoc} */
669 	@Override
670 	public void close() {
671 		packList.set(NO_PACKS);
672 	}
673 
674 	/** Snapshot of packs scanned in a single pass. */
675 	public abstract static class PackList {
676 		/** All known packs, sorted. */
677 		public final DfsPackFile[] packs;
678 
679 		/** All known reftables, sorted. */
680 		public final DfsReftable[] reftables;
681 
682 		private long lastModified = -1;
683 
684 		PackList(DfsPackFile[] packs, DfsReftable[] reftables) {
685 			this.packs = packs;
686 			this.reftables = reftables;
687 		}
688 
689 		/** @return last modified time of all packs, in milliseconds. */
690 		public long getLastModified() {
691 			if (lastModified < 0) {
692 				long max = 0;
693 				for (DfsPackFile pack : packs) {
694 					max = Math.max(max, pack.getPackDescription().getLastModified());
695 				}
696 				lastModified = max;
697 			}
698 			return lastModified;
699 		}
700 
701 		abstract boolean dirty();
702 		abstract void clearDirty();
703 
704 		/**
705 		 * Mark pack list as dirty.
706 		 * <p>
707 		 * Used when the caller knows that new data might have been written to the
708 		 * repository that could invalidate open readers depending on this pack list,
709 		 * for example if refs are newly scanned.
710 		 */
711 		public abstract void markDirty();
712 	}
713 
714 	private static final class PackListImpl extends PackList {
715 		private volatile boolean dirty;
716 
717 		PackListImpl(DfsPackFile[] packs, DfsReftable[] reftables) {
718 			super(packs, reftables);
719 		}
720 
721 		@Override
722 		boolean dirty() {
723 			return dirty;
724 		}
725 
726 		@Override
727 		void clearDirty() {
728 			dirty = false;
729 		}
730 
731 		@Override
732 		public void markDirty() {
733 			dirty = true;
734 		}
735 	}
736 }