View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com> and others
4    *
5    * This program and the accompanying materials are made available under the
6    * terms of the Eclipse Distribution License v. 1.0 which is available at
7    * https://www.eclipse.org/org/documents/edl-v10.php.
8    *
9    * SPDX-License-Identifier: BSD-3-Clause
10   */
11  
12  package org.eclipse.jgit.internal.storage.pack;
13  
14  import static java.util.Objects.requireNonNull;
15  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
16  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
17  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
18  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
19  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
20  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
21  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
22  
23  import java.io.IOException;
24  import java.io.OutputStream;
25  import java.lang.ref.WeakReference;
26  import java.security.MessageDigest;
27  import java.text.MessageFormat;
28  import java.time.Duration;
29  import java.util.ArrayList;
30  import java.util.Arrays;
31  import java.util.Collection;
32  import java.util.Collections;
33  import java.util.HashMap;
34  import java.util.HashSet;
35  import java.util.Iterator;
36  import java.util.List;
37  import java.util.Map;
38  import java.util.NoSuchElementException;
39  import java.util.Set;
40  import java.util.concurrent.ConcurrentHashMap;
41  import java.util.concurrent.ExecutionException;
42  import java.util.concurrent.Executor;
43  import java.util.concurrent.ExecutorService;
44  import java.util.concurrent.Executors;
45  import java.util.concurrent.Future;
46  import java.util.concurrent.TimeUnit;
47  import java.util.zip.CRC32;
48  import java.util.zip.CheckedOutputStream;
49  import java.util.zip.Deflater;
50  import java.util.zip.DeflaterOutputStream;
51  
52  import org.eclipse.jgit.annotations.NonNull;
53  import org.eclipse.jgit.annotations.Nullable;
54  import org.eclipse.jgit.errors.CorruptObjectException;
55  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
56  import org.eclipse.jgit.errors.LargeObjectException;
57  import org.eclipse.jgit.errors.MissingObjectException;
58  import org.eclipse.jgit.errors.SearchForReuseTimeout;
59  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
60  import org.eclipse.jgit.internal.JGitText;
61  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
62  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
63  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
64  import org.eclipse.jgit.lib.AnyObjectId;
65  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
66  import org.eclipse.jgit.lib.BatchingProgressMonitor;
67  import org.eclipse.jgit.lib.BitmapIndex;
68  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
69  import org.eclipse.jgit.lib.BitmapObject;
70  import org.eclipse.jgit.lib.Constants;
71  import org.eclipse.jgit.lib.NullProgressMonitor;
72  import org.eclipse.jgit.lib.ObjectId;
73  import org.eclipse.jgit.lib.ObjectIdOwnerMap;
74  import org.eclipse.jgit.lib.ObjectIdSet;
75  import org.eclipse.jgit.lib.ObjectLoader;
76  import org.eclipse.jgit.lib.ObjectReader;
77  import org.eclipse.jgit.lib.ProgressMonitor;
78  import org.eclipse.jgit.lib.Repository;
79  import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
80  import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
81  import org.eclipse.jgit.revwalk.BitmapWalker;
82  import org.eclipse.jgit.revwalk.DepthWalk;
83  import org.eclipse.jgit.revwalk.ObjectWalk;
84  import org.eclipse.jgit.revwalk.RevCommit;
85  import org.eclipse.jgit.revwalk.RevFlag;
86  import org.eclipse.jgit.revwalk.RevObject;
87  import org.eclipse.jgit.revwalk.RevSort;
88  import org.eclipse.jgit.revwalk.RevTag;
89  import org.eclipse.jgit.revwalk.RevTree;
90  import org.eclipse.jgit.storage.pack.PackConfig;
91  import org.eclipse.jgit.storage.pack.PackStatistics;
92  import org.eclipse.jgit.transport.FilterSpec;
93  import org.eclipse.jgit.transport.ObjectCountCallback;
94  import org.eclipse.jgit.transport.PacketLineOut;
95  import org.eclipse.jgit.transport.WriteAbortedException;
96  import org.eclipse.jgit.util.BlockList;
97  import org.eclipse.jgit.util.TemporaryBuffer;
98  
99  /**
100  * <p>
101  * PackWriter class is responsible for generating pack files from specified set
102  * of objects from repository. This implementation produce pack files in format
103  * version 2.
104  * </p>
105  * <p>
106  * Source of objects may be specified in two ways:
107  * <ul>
108  * <li>(usually) by providing sets of interesting and uninteresting objects in
109  * repository - all interesting objects and their ancestors except uninteresting
110  * objects and their ancestors will be included in pack, or</li>
111  * <li>by providing iterator of {@link org.eclipse.jgit.revwalk.RevObject}
112  * specifying exact list and order of objects in pack</li>
113  * </ul>
114  * <p>
115  * Typical usage consists of creating an instance, configuring options,
116  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
117  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
118  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
119  * pack is being stored as a file the matching index can be written out after
120  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
121  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
122  * followed by {@link #writeBitmapIndex(OutputStream)}.
123  * </p>
124  * <p>
125  * Class provide set of configurable options and
126  * {@link org.eclipse.jgit.lib.ProgressMonitor} support, as operations may take
127  * a long time for big repositories. Deltas searching algorithm is <b>NOT
128  * IMPLEMENTED</b> yet - this implementation relies only on deltas and objects
129  * reuse.
130  * </p>
131  * <p>
132  * This class is not thread safe. It is intended to be used in one thread as a
133  * single pass to produce one pack. Invoking methods multiple times or out of
134  * order is not supported as internal data structures are destroyed during
135  * certain phases to save memory when packing large repositories.
136  * </p>
137  */
138 public class PackWriter implements AutoCloseable {
139 	private static final int PACK_VERSION_GENERATED = 2;
140 
141 	/** Empty set of objects for {@code preparePack()}. */
142 	public static final Set<ObjectId> NONE = Collections.emptySet();
143 
144 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
145 			new ConcurrentHashMap<>();
146 
147 	private static final Iterable<PackWriter> instancesIterable = () -> new Iterator<PackWriter>() {
148 
149 		private final Iterator<WeakReference<PackWriter>> it = instances
150 				.keySet().iterator();
151 
152 		private PackWriter next;
153 
154 		@Override
155 		public boolean hasNext() {
156 			if (next != null) {
157 				return true;
158 			}
159 			while (it.hasNext()) {
160 				WeakReference<PackWriter> ref = it.next();
161 				next = ref.get();
162 				if (next != null) {
163 					return true;
164 				}
165 				it.remove();
166 			}
167 			return false;
168 		}
169 
170 		@Override
171 		public PackWriter next() {
172 			if (hasNext()) {
173 				PackWriter result = next;
174 				next = null;
175 				return result;
176 			}
177 			throw new NoSuchElementException();
178 		}
179 
180 		@Override
181 		public void remove() {
182 			throw new UnsupportedOperationException();
183 		}
184 	};
185 
186 	/**
187 	 * Get all allocated, non-released PackWriters instances.
188 	 *
189 	 * @return all allocated, non-released PackWriters instances.
190 	 */
191 	public static Iterable<PackWriter> getInstances() {
192 		return instancesIterable;
193 	}
194 
195 	@SuppressWarnings("unchecked")
196 	BlockList<ObjectToPack>[] objectsLists = new BlockList[OBJ_TAG + 1];
197 	{
198 		objectsLists[OBJ_COMMIT] = new BlockList<>();
199 		objectsLists[OBJ_TREE] = new BlockList<>();
200 		objectsLists[OBJ_BLOB] = new BlockList<>();
201 		objectsLists[OBJ_TAG] = new BlockList<>();
202 	}
203 
204 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();
205 
206 	// edge objects for thin packs
207 	private List<ObjectToPack> edgeObjects = new BlockList<>();
208 
209 	// Objects the client is known to have already.
210 	private BitmapBuilder haveObjects;
211 
212 	private List<CachedPack> cachedPacks = new ArrayList<>(2);
213 
214 	private Set<ObjectId> tagTargets = NONE;
215 
216 	private Set<? extends ObjectId> excludeFromBitmapSelection = NONE;
217 
218 	private ObjectIdSet[] excludeInPacks;
219 
220 	private ObjectIdSet excludeInPackLast;
221 
222 	private Deflater myDeflater;
223 
224 	private final ObjectReader reader;
225 
226 	/** {@link #reader} recast to the reuse interface, if it supports it. */
227 	private final ObjectReuseAsIs reuseSupport;
228 
229 	final PackConfig config;
230 
231 	private final PackStatistics.Accumulator stats;
232 
233 	private final MutableState state;
234 
235 	private final WeakReference<PackWriter> selfRef;
236 
237 	private PackStatistics.ObjectType.Accumulator typeStats;
238 
239 	private List<ObjectToPack> sortedByName;
240 
241 	private byte[] packcsum;
242 
243 	private boolean deltaBaseAsOffset;
244 
245 	private boolean reuseDeltas;
246 
247 	private boolean reuseDeltaCommits;
248 
249 	private boolean reuseValidate;
250 
251 	private boolean thin;
252 
253 	private boolean useCachedPacks;
254 
255 	private boolean useBitmaps;
256 
257 	private boolean ignoreMissingUninteresting = true;
258 
259 	private boolean pruneCurrentObjectList;
260 
261 	private boolean shallowPack;
262 
263 	private boolean canBuildBitmaps;
264 
265 	private boolean indexDisabled;
266 
267 	private boolean checkSearchForReuseTimeout = false;
268 
269 	private final Duration searchForReuseTimeout;
270 
271 	private long searchForReuseStartTimeEpoc;
272 
273 	private int depth;
274 
275 	private Collection<? extends ObjectId> unshallowObjects;
276 
277 	private PackBitmapIndexBuilder writeBitmaps;
278 
279 	private CRC32 crc32;
280 
281 	private ObjectCountCallback callback;
282 
283 	private FilterSpec filterSpec = FilterSpec.NO_FILTER;
284 
285 	private PackfileUriConfig packfileUriConfig;
286 
287 	/**
288 	 * Create writer for specified repository.
289 	 * <p>
290 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
291 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
292 	 *
293 	 * @param repo
294 	 *            repository where objects are stored.
295 	 */
296 	public PackWriter(Repository repo) {
297 		this(repo, repo.newObjectReader());
298 	}
299 
300 	/**
301 	 * Create a writer to load objects from the specified reader.
302 	 * <p>
303 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
304 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
305 	 *
306 	 * @param reader
307 	 *            reader to read from the repository with.
308 	 */
309 	public PackWriter(ObjectReader reader) {
310 		this(new PackConfig(), reader);
311 	}
312 
313 	/**
314 	 * Create writer for specified repository.
315 	 * <p>
316 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
317 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
318 	 *
319 	 * @param repo
320 	 *            repository where objects are stored.
321 	 * @param reader
322 	 *            reader to read from the repository with.
323 	 */
324 	public PackWriter(Repository repo, ObjectReader reader) {
325 		this(new PackConfig(repo), reader);
326 	}
327 
328 	/**
329 	 * Create writer with a specified configuration.
330 	 * <p>
331 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
332 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
333 	 *
334 	 * @param config
335 	 *            configuration for the pack writer.
336 	 * @param reader
337 	 *            reader to read from the repository with.
338 	 */
339 	public PackWriter(PackConfig config, ObjectReader reader) {
340 		this(config, reader, null);
341 	}
342 
343 	/**
344 	 * Create writer with a specified configuration.
345 	 * <p>
346 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
347 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
348 	 *
349 	 * @param config
350 	 *            configuration for the pack writer.
351 	 * @param reader
352 	 *            reader to read from the repository with.
353 	 * @param statsAccumulator
354 	 *            accumulator for statics
355 	 */
356 	public PackWriter(PackConfig config, final ObjectReader reader,
357 			@Nullable PackStatistics.Accumulator statsAccumulator) {
358 		this.config = config;
359 		this.reader = reader;
360 		if (reader instanceof ObjectReuseAsIs)
361 			reuseSupport = ((ObjectReuseAsIs) reader);
362 		else
363 			reuseSupport = null;
364 
365 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
366 		reuseDeltas = config.isReuseDeltas();
367 		searchForReuseTimeout = config.getSearchForReuseTimeout();
368 		reuseValidate = true; // be paranoid by default
369 		stats = statsAccumulator != null ? statsAccumulator
370 				: new PackStatistics.Accumulator();
371 		state = new MutableState();
372 		selfRef = new WeakReference<>(this);
373 		instances.put(selfRef, Boolean.TRUE);
374 	}
375 
376 	/**
377 	 * Set the {@code ObjectCountCallback}.
378 	 * <p>
379 	 * It should be set before calling
380 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
381 	 *
382 	 * @param callback
383 	 *            the callback to set
384 	 * @return this object for chaining.
385 	 */
386 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
387 		this.callback = callback;
388 		return this;
389 	}
390 
391 	/**
392 	 * Records the set of shallow commits in the client.
393 	 *
394 	 * @param clientShallowCommits
395 	 *            the shallow commits in the client
396 	 */
397 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
398 		stats.clientShallowCommits = Collections
399 				.unmodifiableSet(new HashSet<>(clientShallowCommits));
400 	}
401 
402 	/**
403 	 * Check whether writer can store delta base as an offset (new style
404 	 * reducing pack size) or should store it as an object id (legacy style,
405 	 * compatible with old readers).
406 	 *
407 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
408 	 *
409 	 * @return true if delta base is stored as an offset; false if it is stored
410 	 *         as an object id.
411 	 */
412 	public boolean isDeltaBaseAsOffset() {
413 		return deltaBaseAsOffset;
414 	}
415 
416 	/**
417 	 * Check whether the search for reuse phase is taking too long. This could
418 	 * be the case when the number of objects and pack files is high and the
419 	 * system is under pressure. If that's the case and
420 	 * checkSearchForReuseTimeout is true abort the search.
421 	 *
422 	 * @throws SearchForReuseTimeout
423 	 *             if the search for reuse is taking too long.
424 	 */
425 	public void checkSearchForReuseTimeout() throws SearchForReuseTimeout {
426 		if (checkSearchForReuseTimeout
427 				&& Duration.ofMillis(System.currentTimeMillis()
428 						- searchForReuseStartTimeEpoc)
429 				.compareTo(searchForReuseTimeout) > 0) {
430 			throw new SearchForReuseTimeout(searchForReuseTimeout);
431 		}
432 	}
433 
434 	/**
435 	 * Set writer delta base format. Delta base can be written as an offset in a
436 	 * pack file (new approach reducing file size) or as an object id (legacy
437 	 * approach, compatible with old readers).
438 	 *
439 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
440 	 *
441 	 * @param deltaBaseAsOffset
442 	 *            boolean indicating whether delta base can be stored as an
443 	 *            offset.
444 	 */
445 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
446 		this.deltaBaseAsOffset = deltaBaseAsOffset;
447 	}
448 
449 	/**
450 	 * Set the writer to check for long search for reuse, exceeding the timeout.
451 	 * Selecting an object representation can be an expensive operation. It is
452 	 * possible to set a max search for reuse time (see
453 	 * PackConfig#CONFIG_KEY_SEARCH_FOR_REUSE_TIMEOUT for more details).
454 	 *
455 	 * However some operations, i.e.: GC, need to find the best candidate
456 	 * regardless how much time the operation will need to finish.
457 	 *
458 	 * This method enables the search for reuse timeout check, otherwise
459 	 * disabled.
460 	 */
461 	public void enableSearchForReuseTimeout() {
462 		this.checkSearchForReuseTimeout = true;
463 	}
464 
465 	/**
466 	 * Check if the writer will reuse commits that are already stored as deltas.
467 	 *
468 	 * @return true if the writer would reuse commits stored as deltas, assuming
469 	 *         delta reuse is already enabled.
470 	 */
471 	public boolean isReuseDeltaCommits() {
472 		return reuseDeltaCommits;
473 	}
474 
475 	/**
476 	 * Set the writer to reuse existing delta versions of commits.
477 	 *
478 	 * @param reuse
479 	 *            if true, the writer will reuse any commits stored as deltas.
480 	 *            By default the writer does not reuse delta commits.
481 	 */
482 	public void setReuseDeltaCommits(boolean reuse) {
483 		reuseDeltaCommits = reuse;
484 	}
485 
486 	/**
487 	 * Check if the writer validates objects before copying them.
488 	 *
489 	 * @return true if validation is enabled; false if the reader will handle
490 	 *         object validation as a side-effect of it consuming the output.
491 	 */
492 	public boolean isReuseValidatingObjects() {
493 		return reuseValidate;
494 	}
495 
496 	/**
497 	 * Enable (or disable) object validation during packing.
498 	 *
499 	 * @param validate
500 	 *            if true the pack writer will validate an object before it is
501 	 *            put into the output. This additional validation work may be
502 	 *            necessary to avoid propagating corruption from one local pack
503 	 *            file to another local pack file.
504 	 */
505 	public void setReuseValidatingObjects(boolean validate) {
506 		reuseValidate = validate;
507 	}
508 
509 	/**
510 	 * Whether this writer is producing a thin pack.
511 	 *
512 	 * @return true if this writer is producing a thin pack.
513 	 */
514 	public boolean isThin() {
515 		return thin;
516 	}
517 
518 	/**
519 	 * Whether writer may pack objects with delta base object not within set of
520 	 * objects to pack
521 	 *
522 	 * @param packthin
523 	 *            a boolean indicating whether writer may pack objects with
524 	 *            delta base object not within set of objects to pack, but
525 	 *            belonging to party repository (uninteresting/boundary) as
526 	 *            determined by set; this kind of pack is used only for
527 	 *            transport; true - to produce thin pack, false - otherwise.
528 	 */
529 	public void setThin(boolean packthin) {
530 		thin = packthin;
531 	}
532 
533 	/**
534 	 * Whether to reuse cached packs.
535 	 *
536 	 * @return {@code true} to reuse cached packs. If true index creation isn't
537 	 *         available.
538 	 */
539 	public boolean isUseCachedPacks() {
540 		return useCachedPacks;
541 	}
542 
543 	/**
544 	 * Whether to use cached packs
545 	 *
546 	 * @param useCached
547 	 *            if set to {@code true} and a cached pack is present, it will
548 	 *            be appended onto the end of a thin-pack, reducing the amount
549 	 *            of working set space and CPU used by PackWriter. Enabling this
550 	 *            feature prevents PackWriter from creating an index for the
551 	 *            newly created pack, so its only suitable for writing to a
552 	 *            network client, where the client will make the index.
553 	 */
554 	public void setUseCachedPacks(boolean useCached) {
555 		useCachedPacks = useCached;
556 	}
557 
558 	/**
559 	 * Whether to use bitmaps
560 	 *
561 	 * @return {@code true} to use bitmaps for ObjectWalks, if available.
562 	 */
563 	public boolean isUseBitmaps() {
564 		return useBitmaps;
565 	}
566 
567 	/**
568 	 * Whether to use bitmaps
569 	 *
570 	 * @param useBitmaps
571 	 *            if set to true, bitmaps will be used when preparing a pack.
572 	 */
573 	public void setUseBitmaps(boolean useBitmaps) {
574 		this.useBitmaps = useBitmaps;
575 	}
576 
577 	/**
578 	 * Whether the index file cannot be created by this PackWriter.
579 	 *
580 	 * @return {@code true} if the index file cannot be created by this
581 	 *         PackWriter.
582 	 */
583 	public boolean isIndexDisabled() {
584 		return indexDisabled || !cachedPacks.isEmpty();
585 	}
586 
587 	/**
588 	 * Whether to disable creation of the index file.
589 	 *
590 	 * @param noIndex
591 	 *            {@code true} to disable creation of the index file.
592 	 */
593 	public void setIndexDisabled(boolean noIndex) {
594 		this.indexDisabled = noIndex;
595 	}
596 
597 	/**
598 	 * Whether to ignore missing uninteresting objects
599 	 *
600 	 * @return {@code true} to ignore objects that are uninteresting and also
601 	 *         not found on local disk; false to throw a
602 	 *         {@link org.eclipse.jgit.errors.MissingObjectException} out of
603 	 *         {@link #preparePack(ProgressMonitor, Set, Set)} if an
604 	 *         uninteresting object is not in the source repository. By default,
605 	 *         true, permitting gracefully ignoring of uninteresting objects.
606 	 */
607 	public boolean isIgnoreMissingUninteresting() {
608 		return ignoreMissingUninteresting;
609 	}
610 
611 	/**
612 	 * Whether writer should ignore non existing uninteresting objects
613 	 *
614 	 * @param ignore
615 	 *            {@code true} if writer should ignore non existing
616 	 *            uninteresting objects during construction set of objects to
617 	 *            pack; false otherwise - non existing uninteresting objects may
618 	 *            cause {@link org.eclipse.jgit.errors.MissingObjectException}
619 	 */
620 	public void setIgnoreMissingUninteresting(boolean ignore) {
621 		ignoreMissingUninteresting = ignore;
622 	}
623 
624 	/**
625 	 * Set the tag targets that should be hoisted earlier during packing.
626 	 * <p>
627 	 * Callers may put objects into this set before invoking any of the
628 	 * preparePack methods to influence where an annotated tag's target is
629 	 * stored within the resulting pack. Typically these will be clustered
630 	 * together, and hoisted earlier in the file even if they are ancient
631 	 * revisions, allowing readers to find tag targets with better locality.
632 	 *
633 	 * @param objects
634 	 *            objects that annotated tags point at.
635 	 */
636 	public void setTagTargets(Set<ObjectId> objects) {
637 		tagTargets = objects;
638 	}
639 
640 	/**
641 	 * Configure this pack for a shallow clone.
642 	 *
643 	 * @param depth
644 	 *            maximum depth of history to return. 1 means return only the
645 	 *            "wants".
646 	 * @param unshallow
647 	 *            objects which used to be shallow on the client, but are being
648 	 *            extended as part of this fetch
649 	 */
650 	public void setShallowPack(int depth,
651 			Collection<? extends ObjectId> unshallow) {
652 		this.shallowPack = true;
653 		this.depth = depth;
654 		this.unshallowObjects = unshallow;
655 	}
656 
657 	/**
658 	 * @param filter the filter which indicates what and what not this writer
659 	 *            should include
660 	 */
661 	public void setFilterSpec(@NonNull FilterSpec filter) {
662 		filterSpec = requireNonNull(filter);
663 	}
664 
665 	/**
666 	 * @param config configuration related to packfile URIs
667 	 * @since 5.5
668 	 */
669 	public void setPackfileUriConfig(PackfileUriConfig config) {
670 		packfileUriConfig = config;
671 	}
672 
673 	/**
674 	 * Returns objects number in a pack file that was created by this writer.
675 	 *
676 	 * @return number of objects in pack.
677 	 * @throws java.io.IOException
678 	 *             a cached pack cannot supply its object count.
679 	 */
680 	public long getObjectCount() throws IOException {
681 		if (stats.totalObjects == 0) {
682 			long objCnt = 0;
683 
684 			objCnt += objectsLists[OBJ_COMMIT].size();
685 			objCnt += objectsLists[OBJ_TREE].size();
686 			objCnt += objectsLists[OBJ_BLOB].size();
687 			objCnt += objectsLists[OBJ_TAG].size();
688 
689 			for (CachedPack pack : cachedPacks)
690 				objCnt += pack.getObjectCount();
691 			return objCnt;
692 		}
693 		return stats.totalObjects;
694 	}
695 
696 	private long getUnoffloadedObjectCount() throws IOException {
697 		long objCnt = 0;
698 
699 		objCnt += objectsLists[OBJ_COMMIT].size();
700 		objCnt += objectsLists[OBJ_TREE].size();
701 		objCnt += objectsLists[OBJ_BLOB].size();
702 		objCnt += objectsLists[OBJ_TAG].size();
703 
704 		for (CachedPack pack : cachedPacks) {
705 			CachedPackUriProvider.PackInfo packInfo =
706 				packfileUriConfig.cachedPackUriProvider.getInfo(
707 					pack, packfileUriConfig.protocolsSupported);
708 			if (packInfo == null) {
709 				objCnt += pack.getObjectCount();
710 			}
711 		}
712 
713 		return objCnt;
714 	}
715 
716 	/**
717 	 * Returns the object ids in the pack file that was created by this writer.
718 	 * <p>
719 	 * This method can only be invoked after
720 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
721 	 * been invoked and completed successfully.
722 	 *
723 	 * @return set of objects in pack.
724 	 * @throws java.io.IOException
725 	 *             a cached pack cannot supply its object ids.
726 	 */
727 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
728 			throws IOException {
729 		if (!cachedPacks.isEmpty())
730 			throw new IOException(
731 					JGitText.get().cachedPacksPreventsListingObjects);
732 
733 		if (writeBitmaps != null) {
734 			return writeBitmaps.getObjectSet();
735 		}
736 
737 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
738 		for (BlockList<ObjectToPack> objList : objectsLists) {
739 			if (objList != null) {
740 				for (ObjectToPack otp : objList)
741 					r.add(new ObjectIdOwnerMap.Entry(otp) {
742 						// A new entry that copies the ObjectId
743 					});
744 			}
745 		}
746 		return r;
747 	}
748 
749 	/**
750 	 * Add a pack index whose contents should be excluded from the result.
751 	 *
752 	 * @param idx
753 	 *            objects in this index will not be in the output pack.
754 	 */
755 	public void excludeObjects(ObjectIdSet idx) {
756 		if (excludeInPacks == null) {
757 			excludeInPacks = new ObjectIdSet[] { idx };
758 			excludeInPackLast = idx;
759 		} else {
760 			int cnt = excludeInPacks.length;
761 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
762 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
763 			newList[cnt] = idx;
764 			excludeInPacks = newList;
765 		}
766 	}
767 
768 	/**
769 	 * Prepare the list of objects to be written to the pack stream.
770 	 * <p>
771 	 * Iterator <b>exactly</b> determines which objects are included in a pack
772 	 * and order they appear in pack (except that objects order by type is not
773 	 * needed at input). This order should conform general rules of ordering
774 	 * objects in git - by recency and path (type and delta-base first is
775 	 * internally secured) and responsibility for guaranteeing this order is on
776 	 * a caller side. Iterator must return each id of object to write exactly
777 	 * once.
778 	 * </p>
779 	 *
780 	 * @param objectsSource
781 	 *            iterator of object to store in a pack; order of objects within
782 	 *            each type is important, ordering by type is not needed;
783 	 *            allowed types for objects are
784 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_COMMIT},
785 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TREE},
786 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_BLOB} and
787 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TAG}; objects
788 	 *            returned by iterator may be later reused by caller as object
789 	 *            id and type are internally copied in each iteration.
790 	 * @throws java.io.IOException
791 	 *             when some I/O problem occur during reading objects.
792 	 */
793 	public void preparePack(@NonNull Iterator<RevObject> objectsSource)
794 			throws IOException {
795 		while (objectsSource.hasNext()) {
796 			addObject(objectsSource.next());
797 		}
798 	}
799 
800 	/**
801 	 * Prepare the list of objects to be written to the pack stream.
802 	 *
803 	 * <p>
804 	 * PackWriter will concat and write out the specified packs as-is.
805 	 *
806 	 * @param c
807 	 *            cached packs to be written.
808 	 */
809 	public void preparePack(Collection<? extends CachedPack> c) {
810 		cachedPacks.addAll(c);
811 	}
812 
813 	/**
814 	 * Prepare the list of objects to be written to the pack stream.
815 	 * <p>
816 	 * Basing on these 2 sets, another set of objects to put in a pack file is
817 	 * created: this set consists of all objects reachable (ancestors) from
818 	 * interesting objects, except uninteresting objects and their ancestors.
819 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
820 	 * extensively to find out that appropriate set of output objects and their
821 	 * optimal order in output pack. Order is consistent with general git
822 	 * in-pack rules: sort by object type, recency, path and delta-base first.
823 	 * </p>
824 	 *
825 	 * @param countingMonitor
826 	 *            progress during object enumeration.
827 	 * @param want
828 	 *            collection of objects to be marked as interesting (start
829 	 *            points of graph traversal). Must not be {@code null}.
830 	 * @param have
831 	 *            collection of objects to be marked as uninteresting (end
832 	 *            points of graph traversal). Pass {@link #NONE} if all objects
833 	 *            reachable from {@code want} are desired, such as when serving
834 	 *            a clone.
835 	 * @throws java.io.IOException
836 	 *             when some I/O problem occur during reading objects.
837 	 */
838 	public void preparePack(ProgressMonitor countingMonitor,
839 			@NonNull Set<? extends ObjectId> want,
840 			@NonNull Set<? extends ObjectId> have) throws IOException {
841 		preparePack(countingMonitor, want, have, NONE, NONE);
842 	}
843 
844 	/**
845 	 * Prepare the list of objects to be written to the pack stream.
846 	 * <p>
847 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
848 	 * specifying commits that should not be walked past ("shallow" commits).
849 	 * The caller is responsible for filtering out commits that should not be
850 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
851 	 * the shallow set.
852 	 *
853 	 * @param countingMonitor
854 	 *            progress during object enumeration.
855 	 * @param want
856 	 *            objects of interest, ancestors of which will be included in
857 	 *            the pack. Must not be {@code null}.
858 	 * @param have
859 	 *            objects whose ancestors (up to and including {@code shallow}
860 	 *            commits) do not need to be included in the pack because they
861 	 *            are already available from elsewhere. Must not be
862 	 *            {@code null}.
863 	 * @param shallow
864 	 *            commits indicating the boundary of the history marked with
865 	 *            {@code have}. Shallow commits have parents but those parents
866 	 *            are considered not to be already available. Parents of
867 	 *            {@code shallow} commits and earlier generations will be
868 	 *            included in the pack if requested by {@code want}. Must not be
869 	 *            {@code null}.
870 	 * @throws java.io.IOException
871 	 *             an I/O problem occurred while reading objects.
872 	 */
873 	public void preparePack(ProgressMonitor countingMonitor,
874 			@NonNull Set<? extends ObjectId> want,
875 			@NonNull Set<? extends ObjectId> have,
876 			@NonNull Set<? extends ObjectId> shallow) throws IOException {
877 		preparePack(countingMonitor, want, have, shallow, NONE);
878 	}
879 
880 	/**
881 	 * Prepare the list of objects to be written to the pack stream.
882 	 * <p>
883 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
884 	 * specifying commits that should not be walked past ("shallow" commits).
885 	 * The caller is responsible for filtering out commits that should not be
886 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
887 	 * the shallow set.
888 	 *
889 	 * @param countingMonitor
890 	 *            progress during object enumeration.
891 	 * @param want
892 	 *            objects of interest, ancestors of which will be included in
893 	 *            the pack. Must not be {@code null}.
894 	 * @param have
895 	 *            objects whose ancestors (up to and including {@code shallow}
896 	 *            commits) do not need to be included in the pack because they
897 	 *            are already available from elsewhere. Must not be
898 	 *            {@code null}.
899 	 * @param shallow
900 	 *            commits indicating the boundary of the history marked with
901 	 *            {@code have}. Shallow commits have parents but those parents
902 	 *            are considered not to be already available. Parents of
903 	 *            {@code shallow} commits and earlier generations will be
904 	 *            included in the pack if requested by {@code want}. Must not be
905 	 *            {@code null}.
906 	 * @param noBitmaps
907 	 *            collection of objects to be excluded from bitmap commit
908 	 *            selection.
909 	 * @throws java.io.IOException
910 	 *             an I/O problem occurred while reading objects.
911 	 */
912 	public void preparePack(ProgressMonitor countingMonitor,
913 			@NonNull Set<? extends ObjectId> want,
914 			@NonNull Set<? extends ObjectId> have,
915 			@NonNull Set<? extends ObjectId> shallow,
916 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
917 		try (ObjectWalk ow = getObjectWalk()) {
918 			ow.assumeShallow(shallow);
919 			preparePack(countingMonitor, ow, want, have, noBitmaps);
920 		}
921 	}
922 
923 	private ObjectWalk getObjectWalk() {
924 		return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
925 				: new ObjectWalk(reader);
926 	}
927 
928 	/**
929 	 * A visitation policy which uses the depth at which the object is seen to
930 	 * decide if re-traversal is necessary. In particular, if the object has
931 	 * already been visited at this depth or shallower, it is not necessary to
932 	 * re-visit at this depth.
933 	 */
934 	private static class DepthAwareVisitationPolicy
935 			implements ObjectWalk.VisitationPolicy {
936 		private final Map<ObjectId, Integer> lowestDepthVisited = new HashMap<>();
937 
938 		private final ObjectWalk walk;
939 
940 		DepthAwareVisitationPolicy(ObjectWalk walk) {
941 			this.walk = requireNonNull(walk);
942 		}
943 
944 		@Override
945 		public boolean shouldVisit(RevObject o) {
946 			Integer lastDepth = lowestDepthVisited.get(o);
947 			if (lastDepth == null) {
948 				return true;
949 			}
950 			return walk.getTreeDepth() < lastDepth.intValue();
951 		}
952 
953 		@Override
954 		public void visited(RevObject o) {
955 			lowestDepthVisited.put(o, Integer.valueOf(walk.getTreeDepth()));
956 		}
957 	}
958 
959 	/**
960 	 * Prepare the list of objects to be written to the pack stream.
961 	 * <p>
962 	 * Basing on these 2 sets, another set of objects to put in a pack file is
963 	 * created: this set consists of all objects reachable (ancestors) from
964 	 * interesting objects, except uninteresting objects and their ancestors.
965 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
966 	 * extensively to find out that appropriate set of output objects and their
967 	 * optimal order in output pack. Order is consistent with general git
968 	 * in-pack rules: sort by object type, recency, path and delta-base first.
969 	 * </p>
970 	 *
971 	 * @param countingMonitor
972 	 *            progress during object enumeration.
973 	 * @param walk
974 	 *            ObjectWalk to perform enumeration.
975 	 * @param interestingObjects
976 	 *            collection of objects to be marked as interesting (start
977 	 *            points of graph traversal). Must not be {@code null}.
978 	 * @param uninterestingObjects
979 	 *            collection of objects to be marked as uninteresting (end
980 	 *            points of graph traversal). Pass {@link #NONE} if all objects
981 	 *            reachable from {@code want} are desired, such as when serving
982 	 *            a clone.
983 	 * @param noBitmaps
984 	 *            collection of objects to be excluded from bitmap commit
985 	 *            selection.
986 	 * @throws java.io.IOException
987 	 *             when some I/O problem occur during reading objects.
988 	 */
989 	public void preparePack(ProgressMonitor countingMonitor,
990 			@NonNull ObjectWalk walk,
991 			@NonNull Set<? extends ObjectId> interestingObjects,
992 			@NonNull Set<? extends ObjectId> uninterestingObjects,
993 			@NonNull Set<? extends ObjectId> noBitmaps)
994 			throws IOException {
995 		if (countingMonitor == null)
996 			countingMonitor = NullProgressMonitor.INSTANCE;
997 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
998 			throw new IllegalArgumentException(
999 					JGitText.get().shallowPacksRequireDepthWalk);
1000 		if (filterSpec.getTreeDepthLimit() >= 0) {
1001 			walk.setVisitationPolicy(new DepthAwareVisitationPolicy(walk));
1002 		}
1003 		findObjectsToPack(countingMonitor, walk, interestingObjects,
1004 				uninterestingObjects, noBitmaps);
1005 	}
1006 
1007 	/**
1008 	 * Determine if the pack file will contain the requested object.
1009 	 *
1010 	 * @param id
1011 	 *            the object to test the existence of.
1012 	 * @return true if the object will appear in the output pack file.
1013 	 * @throws java.io.IOException
1014 	 *             a cached pack cannot be examined.
1015 	 */
1016 	public boolean willInclude(AnyObjectId id) throws IOException {
1017 		ObjectToPack obj = objectsMap.get(id);
1018 		return obj != null && !obj.isEdge();
1019 	}
1020 
1021 	/**
1022 	 * Lookup the ObjectToPack object for a given ObjectId.
1023 	 *
1024 	 * @param id
1025 	 *            the object to find in the pack.
1026 	 * @return the object we are packing, or null.
1027 	 */
1028 	public ObjectToPack get(AnyObjectId id) {
1029 		ObjectToPack obj = objectsMap.get(id);
1030 		return obj != null && !obj.isEdge() ? obj : null;
1031 	}
1032 
1033 	/**
1034 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
1035 	 * pack, as used to name a pack file in repository.
1036 	 *
1037 	 * @return ObjectId representing SHA-1 name of a pack that was created.
1038 	 */
1039 	public ObjectId computeName() {
1040 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
1041 		final MessageDigest md = Constants.newMessageDigest();
1042 		for (ObjectToPack otp : sortByName()) {
1043 			otp.copyRawTo(buf, 0);
1044 			md.update(buf, 0, OBJECT_ID_LENGTH);
1045 		}
1046 		return ObjectId.fromRaw(md.digest());
1047 	}
1048 
1049 	/**
1050 	 * Returns the index format version that will be written.
1051 	 * <p>
1052 	 * This method can only be invoked after
1053 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
1054 	 * been invoked and completed successfully.
1055 	 *
1056 	 * @return the index format version.
1057 	 */
1058 	public int getIndexVersion() {
1059 		int indexVersion = config.getIndexVersion();
1060 		if (indexVersion <= 0) {
1061 			for (BlockList<ObjectToPack> objs : objectsLists)
1062 				indexVersion = Math.max(indexVersion,
1063 						PackIndexWriter.oldestPossibleFormat(objs));
1064 		}
1065 		return indexVersion;
1066 	}
1067 
1068 	/**
1069 	 * Create an index file to match the pack file just written.
1070 	 * <p>
1071 	 * Called after
1072 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
1073 	 * <p>
1074 	 * Writing an index is only required for local pack storage. Packs sent on
1075 	 * the network do not need to create an index.
1076 	 *
1077 	 * @param indexStream
1078 	 *            output for the index data. Caller is responsible for closing
1079 	 *            this stream.
1080 	 * @throws java.io.IOException
1081 	 *             the index data could not be written to the supplied stream.
1082 	 */
1083 	public void writeIndex(OutputStream indexStream) throws IOException {
1084 		if (isIndexDisabled())
1085 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
1086 
1087 		long writeStart = System.currentTimeMillis();
1088 		final PackIndexWriter iw = PackIndexWriter.createVersion(
1089 				indexStream, getIndexVersion());
1090 		iw.write(sortByName(), packcsum);
1091 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1092 	}
1093 
1094 	/**
1095 	 * Create a bitmap index file to match the pack file just written.
1096 	 * <p>
1097 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
1098 	 *
1099 	 * @param bitmapIndexStream
1100 	 *            output for the bitmap index data. Caller is responsible for
1101 	 *            closing this stream.
1102 	 * @throws java.io.IOException
1103 	 *             the index data could not be written to the supplied stream.
1104 	 */
1105 	public void writeBitmapIndex(OutputStream bitmapIndexStream)
1106 			throws IOException {
1107 		if (writeBitmaps == null)
1108 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
1109 
1110 		long writeStart = System.currentTimeMillis();
1111 		final PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
1112 		iw.write(writeBitmaps, packcsum);
1113 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1114 	}
1115 
1116 	private List<ObjectToPack> sortByName() {
1117 		if (sortedByName == null) {
1118 			int cnt = 0;
1119 			cnt += objectsLists[OBJ_COMMIT].size();
1120 			cnt += objectsLists[OBJ_TREE].size();
1121 			cnt += objectsLists[OBJ_BLOB].size();
1122 			cnt += objectsLists[OBJ_TAG].size();
1123 
1124 			sortedByName = new BlockList<>(cnt);
1125 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
1126 			sortedByName.addAll(objectsLists[OBJ_TREE]);
1127 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
1128 			sortedByName.addAll(objectsLists[OBJ_TAG]);
1129 			Collections.sort(sortedByName);
1130 		}
1131 		return sortedByName;
1132 	}
1133 
1134 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
1135 			long cnt) {
1136 		state.phase = phase;
1137 		String task;
1138 		switch (phase) {
1139 		case COUNTING:
1140 			task = JGitText.get().countingObjects;
1141 			break;
1142 		case GETTING_SIZES:
1143 			task = JGitText.get().searchForSizes;
1144 			break;
1145 		case FINDING_SOURCES:
1146 			task = JGitText.get().searchForReuse;
1147 			break;
1148 		case COMPRESSING:
1149 			task = JGitText.get().compressingObjects;
1150 			break;
1151 		case WRITING:
1152 			task = JGitText.get().writingObjects;
1153 			break;
1154 		case BUILDING_BITMAPS:
1155 			task = JGitText.get().buildingBitmaps;
1156 			break;
1157 		default:
1158 			throw new IllegalArgumentException(
1159 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
1160 		}
1161 		monitor.beginTask(task, (int) cnt);
1162 	}
1163 
1164 	private void endPhase(ProgressMonitor monitor) {
1165 		monitor.endTask();
1166 	}
1167 
1168 	/**
1169 	 * Write the prepared pack to the supplied stream.
1170 	 * <p>
1171 	 * Called after
1172 	 * {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set, Set)} or
1173 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
1174 	 * <p>
1175 	 * Performs delta search if enabled and writes the pack stream.
1176 	 * <p>
1177 	 * All reused objects data checksum (Adler32/CRC32) is computed and
1178 	 * validated against existing checksum.
1179 	 *
1180 	 * @param compressMonitor
1181 	 *            progress monitor to report object compression work.
1182 	 * @param writeMonitor
1183 	 *            progress monitor to report the number of objects written.
1184 	 * @param packStream
1185 	 *            output stream of pack data. The stream should be buffered by
1186 	 *            the caller. The caller is responsible for closing the stream.
1187 	 * @throws java.io.IOException
1188 	 *             an error occurred reading a local object's data to include in
1189 	 *             the pack, or writing compressed object data to the output
1190 	 *             stream.
1191 	 * @throws WriteAbortedException
1192 	 *             the write operation is aborted by
1193 	 *             {@link org.eclipse.jgit.transport.ObjectCountCallback} .
1194 	 */
1195 	public void writePack(ProgressMonitor compressMonitor,
1196 			ProgressMonitor writeMonitor, OutputStream packStream)
1197 			throws IOException {
1198 		if (compressMonitor == null)
1199 			compressMonitor = NullProgressMonitor.INSTANCE;
1200 		if (writeMonitor == null)
1201 			writeMonitor = NullProgressMonitor.INSTANCE;
1202 
1203 		excludeInPacks = null;
1204 		excludeInPackLast = null;
1205 
1206 		boolean needSearchForReuse = reuseSupport != null && (
1207 				   reuseDeltas
1208 				|| config.isReuseObjects()
1209 				|| !cachedPacks.isEmpty());
1210 
1211 		if (compressMonitor instanceof BatchingProgressMonitor) {
1212 			long delay = 1000;
1213 			if (needSearchForReuse && config.isDeltaCompress())
1214 				delay = 500;
1215 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
1216 					delay,
1217 					TimeUnit.MILLISECONDS);
1218 		}
1219 
1220 		if (needSearchForReuse)
1221 			searchForReuse(compressMonitor);
1222 		if (config.isDeltaCompress())
1223 			searchForDeltas(compressMonitor);
1224 
1225 		crc32 = new CRC32();
1226 		final PackOutputStream out = new PackOutputStream(
1227 			writeMonitor,
1228 			isIndexDisabled()
1229 				? packStream
1230 				: new CheckedOutputStream(packStream, crc32),
1231 			this);
1232 
1233 		long objCnt = packfileUriConfig == null ? getObjectCount() :
1234 			getUnoffloadedObjectCount();
1235 		stats.totalObjects = objCnt;
1236 		if (callback != null)
1237 			callback.setObjectCount(objCnt);
1238 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
1239 		long writeStart = System.currentTimeMillis();
1240 		try {
1241 			List<CachedPack> unwrittenCachedPacks;
1242 
1243 			if (packfileUriConfig != null) {
1244 				unwrittenCachedPacks = new ArrayList<>();
1245 				CachedPackUriProvider p = packfileUriConfig.cachedPackUriProvider;
1246 				PacketLineOut o = packfileUriConfig.pckOut;
1247 
1248 				o.writeString("packfile-uris\n"); //$NON-NLS-1$
1249 				for (CachedPack pack : cachedPacks) {
1250 					CachedPackUriProvider.PackInfo packInfo = p.getInfo(
1251 							pack, packfileUriConfig.protocolsSupported);
1252 					if (packInfo != null) {
1253 						o.writeString(packInfo.getHash() + ' ' +
1254 								packInfo.getUri() + '\n');
1255 						stats.offloadedPackfiles += 1;
1256 						stats.offloadedPackfileSize += packInfo.getSize();
1257 					} else {
1258 						unwrittenCachedPacks.add(pack);
1259 					}
1260 				}
1261 				packfileUriConfig.pckOut.writeDelim();
1262 				packfileUriConfig.pckOut.writeString("packfile\n"); //$NON-NLS-1$
1263 			} else {
1264 				unwrittenCachedPacks = cachedPacks;
1265 			}
1266 
1267 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
1268 			out.flush();
1269 
1270 			writeObjects(out);
1271 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1272 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1273 					if (typeStat == null)
1274 						continue;
1275 					stats.thinPackBytes += typeStat.bytes;
1276 				}
1277 			}
1278 
1279 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1280 			for (CachedPack pack : unwrittenCachedPacks) {
1281 				long deltaCnt = pack.getDeltaCount();
1282 				stats.reusedObjects += pack.getObjectCount();
1283 				stats.reusedDeltas += deltaCnt;
1284 				stats.totalDeltas += deltaCnt;
1285 				reuseSupport.copyPackAsIs(out, pack);
1286 			}
1287 			writeChecksum(out);
1288 			out.flush();
1289 		} finally {
1290 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1291 			stats.depth = depth;
1292 
1293 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1294 				if (typeStat == null)
1295 					continue;
1296 				typeStat.cntDeltas += typeStat.reusedDeltas;
1297 				stats.reusedObjects += typeStat.reusedObjects;
1298 				stats.reusedDeltas += typeStat.reusedDeltas;
1299 				stats.totalDeltas += typeStat.cntDeltas;
1300 			}
1301 		}
1302 
1303 		stats.totalBytes = out.length();
1304 		reader.close();
1305 		endPhase(writeMonitor);
1306 	}
1307 
1308 	/**
1309 	 * Get statistics of what this PackWriter did in order to create the final
1310 	 * pack stream.
1311 	 *
1312 	 * @return description of what this PackWriter did in order to create the
1313 	 *         final pack stream. This should only be invoked after the calls to
1314 	 *         create the pack/index/bitmap have completed.
1315 	 */
1316 	public PackStatistics getStatistics() {
1317 		return new PackStatistics(stats);
1318 	}
1319 
1320 	/**
1321 	 * Get snapshot of the current state of this PackWriter.
1322 	 *
1323 	 * @return snapshot of the current state of this PackWriter.
1324 	 */
1325 	public State getState() {
1326 		return state.snapshot();
1327 	}
1328 
1329 	/**
1330 	 * {@inheritDoc}
1331 	 * <p>
1332 	 * Release all resources used by this writer.
1333 	 */
1334 	@Override
1335 	public void close() {
1336 		reader.close();
1337 		if (myDeflater != null) {
1338 			myDeflater.end();
1339 			myDeflater = null;
1340 		}
1341 		instances.remove(selfRef);
1342 	}
1343 
1344 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1345 		long cnt = 0;
1346 		cnt += objectsLists[OBJ_COMMIT].size();
1347 		cnt += objectsLists[OBJ_TREE].size();
1348 		cnt += objectsLists[OBJ_BLOB].size();
1349 		cnt += objectsLists[OBJ_TAG].size();
1350 
1351 		long start = System.currentTimeMillis();
1352 		searchForReuseStartTimeEpoc = start;
1353 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1354 		if (cnt <= 4096) {
1355 			// For small object counts, do everything as one list.
1356 			BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
1357 			tmp.addAll(objectsLists[OBJ_TAG]);
1358 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1359 			tmp.addAll(objectsLists[OBJ_TREE]);
1360 			tmp.addAll(objectsLists[OBJ_BLOB]);
1361 			searchForReuse(monitor, tmp);
1362 			if (pruneCurrentObjectList) {
1363 				// If the list was pruned, we need to re-prune the main lists.
1364 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1365 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1366 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1367 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1368 			}
1369 		} else {
1370 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1371 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1372 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1373 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1374 		}
1375 		endPhase(monitor);
1376 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1377 
1378 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1379 			cutDeltaChains(objectsLists[OBJ_TREE]);
1380 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1381 		}
1382 	}
1383 
1384 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1385 			throws IOException, MissingObjectException {
1386 		pruneCurrentObjectList = false;
1387 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1388 		if (pruneCurrentObjectList)
1389 			pruneEdgesFromObjectList(list);
1390 	}
1391 
1392 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1393 			throws IOException {
1394 		int max = config.getMaxDeltaDepth();
1395 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1396 			int d = 0;
1397 			ObjectToPack b = list.get(idx).getDeltaBase();
1398 			while (b != null) {
1399 				if (d < b.getChainLength())
1400 					break;
1401 				b.setChainLength(++d);
1402 				if (d >= max && b.isDeltaRepresentation()) {
1403 					reselectNonDelta(b);
1404 					break;
1405 				}
1406 				b = b.getDeltaBase();
1407 			}
1408 		}
1409 		if (config.isDeltaCompress()) {
1410 			for (ObjectToPack otp : list)
1411 				otp.clearChainLength();
1412 		}
1413 	}
1414 
1415 	private void searchForDeltas(ProgressMonitor monitor)
1416 			throws MissingObjectException, IncorrectObjectTypeException,
1417 			IOException {
1418 		// Commits and annotated tags tend to have too many differences to
1419 		// really benefit from delta compression. Consequently just don't
1420 		// bother examining those types here.
1421 		//
1422 		ObjectToPack[] list = new ObjectToPack[
1423 				  objectsLists[OBJ_TREE].size()
1424 				+ objectsLists[OBJ_BLOB].size()
1425 				+ edgeObjects.size()];
1426 		int cnt = 0;
1427 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1428 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1429 		if (cnt == 0)
1430 			return;
1431 		int nonEdgeCnt = cnt;
1432 
1433 		// Queue up any edge objects that we might delta against.  We won't
1434 		// be sending these as we assume the other side has them, but we need
1435 		// them in the search phase below.
1436 		//
1437 		for (ObjectToPack eo : edgeObjects) {
1438 			eo.setWeight(0);
1439 			list[cnt++] = eo;
1440 		}
1441 
1442 		// Compute the sizes of the objects so we can do a proper sort.
1443 		// We let the reader skip missing objects if it chooses. For
1444 		// some readers this can be a huge win. We detect missing objects
1445 		// by having set the weights above to 0 and allowing the delta
1446 		// search code to discover the missing object and skip over it, or
1447 		// abort with an exception if we actually had to have it.
1448 		//
1449 		final long sizingStart = System.currentTimeMillis();
1450 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1451 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1452 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1453 		try {
1454 			final long limit = Math.min(
1455 					config.getBigFileThreshold(),
1456 					Integer.MAX_VALUE);
1457 			for (;;) {
1458 				try {
1459 					if (!sizeQueue.next())
1460 						break;
1461 				} catch (MissingObjectException notFound) {
1462 					monitor.update(1);
1463 					if (ignoreMissingUninteresting) {
1464 						ObjectToPack otp = sizeQueue.getCurrent();
1465 						if (otp != null && otp.isEdge()) {
1466 							otp.setDoNotDelta();
1467 							continue;
1468 						}
1469 
1470 						otp = objectsMap.get(notFound.getObjectId());
1471 						if (otp != null && otp.isEdge()) {
1472 							otp.setDoNotDelta();
1473 							continue;
1474 						}
1475 					}
1476 					throw notFound;
1477 				}
1478 
1479 				ObjectToPack otp = sizeQueue.getCurrent();
1480 				if (otp == null)
1481 					otp = objectsMap.get(sizeQueue.getObjectId());
1482 
1483 				long sz = sizeQueue.getSize();
1484 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1485 					otp.setWeight((int) sz);
1486 				else
1487 					otp.setDoNotDelta(); // too small, or too big
1488 				monitor.update(1);
1489 			}
1490 		} finally {
1491 			sizeQueue.release();
1492 		}
1493 		endPhase(monitor);
1494 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1495 
1496 		// Sort the objects by path hash so like files are near each other,
1497 		// and then by size descending so that bigger files are first. This
1498 		// applies "Linus' Law" which states that newer files tend to be the
1499 		// bigger ones, because source files grow and hardly ever shrink.
1500 		//
1501 		Arrays.sort(list, 0, cnt, (ObjectToPack a, ObjectToPack b) -> {
1502 			int cmp = (a.isDoNotDelta() ? 1 : 0) - (b.isDoNotDelta() ? 1 : 0);
1503 			if (cmp != 0) {
1504 				return cmp;
1505 			}
1506 
1507 			cmp = a.getType() - b.getType();
1508 			if (cmp != 0) {
1509 				return cmp;
1510 			}
1511 
1512 			cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1513 			if (cmp != 0) {
1514 				return cmp;
1515 			}
1516 
1517 			cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1518 			if (cmp != 0) {
1519 				return cmp;
1520 			}
1521 
1522 			cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1523 			if (cmp != 0) {
1524 				return cmp;
1525 			}
1526 
1527 			return b.getWeight() - a.getWeight();
1528 		});
1529 
1530 		// Above we stored the objects we cannot delta onto the end.
1531 		// Remove them from the list so we don't waste time on them.
1532 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1533 			if (!list[cnt - 1].isEdge())
1534 				nonEdgeCnt--;
1535 			cnt--;
1536 		}
1537 		if (cnt == 0)
1538 			return;
1539 
1540 		final long searchStart = System.currentTimeMillis();
1541 		searchForDeltas(monitor, list, cnt);
1542 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1543 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1544 
1545 		for (int i = 0; i < cnt; i++)
1546 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1547 				stats.deltasFound++;
1548 	}
1549 
1550 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1551 		for (ObjectToPack otp : objectsLists[type]) {
1552 			if (otp.isDoNotDelta()) // delta is disabled for this path
1553 				continue;
1554 			if (otp.isDeltaRepresentation()) // already reusing a delta
1555 				continue;
1556 			otp.setWeight(0);
1557 			list[cnt++] = otp;
1558 		}
1559 		return cnt;
1560 	}
1561 
1562 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1563 		otp.clearDeltaBase();
1564 		otp.clearReuseAsIs();
1565 		boolean old = reuseDeltas;
1566 		reuseDeltas = false;
1567 		reuseSupport.selectObjectRepresentation(this,
1568 				NullProgressMonitor.INSTANCE,
1569 				Collections.singleton(otp));
1570 		reuseDeltas = old;
1571 	}
1572 
1573 	private void searchForDeltas(final ProgressMonitor monitor,
1574 			final ObjectToPack[] list, final int cnt)
1575 			throws MissingObjectException, IncorrectObjectTypeException,
1576 			LargeObjectException, IOException {
1577 		int threads = config.getThreads();
1578 		if (threads == 0)
1579 			threads = Runtime.getRuntime().availableProcessors();
1580 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1581 			singleThreadDeltaSearch(monitor, list, cnt);
1582 		else
1583 			parallelDeltaSearch(monitor, list, cnt, threads);
1584 	}
1585 
1586 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1587 			ObjectToPack[] list, int cnt) throws IOException {
1588 		long totalWeight = 0;
1589 		for (int i = 0; i < cnt; i++) {
1590 			ObjectToPack o = list[i];
1591 			totalWeight += DeltaTask.getAdjustedWeight(o);
1592 		}
1593 
1594 		long bytesPerUnit = 1;
1595 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1596 			bytesPerUnit <<= 10;
1597 		int cost = (int) (totalWeight / bytesPerUnit);
1598 		if (totalWeight % bytesPerUnit != 0)
1599 			cost++;
1600 
1601 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1602 		new DeltaWindow(config, new DeltaCache(config), reader,
1603 				monitor, bytesPerUnit,
1604 				list, 0, cnt).search();
1605 		endPhase(monitor);
1606 	}
1607 
1608 	@SuppressWarnings("Finally")
1609 	private void parallelDeltaSearch(ProgressMonitor monitor,
1610 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1611 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1612 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1613 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1614 				reader, dc, pm,
1615 				list, 0, cnt);
1616 		taskBlock.partitionTasks();
1617 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1618 		pm.startWorkers(taskBlock.tasks.size());
1619 
1620 		Executor executor = config.getExecutor();
1621 		final List<Throwable> errors =
1622 				Collections.synchronizedList(new ArrayList<>(threads));
1623 		if (executor instanceof ExecutorService) {
1624 			// Caller supplied us a service, use it directly.
1625 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1626 		} else if (executor == null) {
1627 			// Caller didn't give us a way to run the tasks, spawn up a
1628 			// temporary thread pool and make sure it tears down cleanly.
1629 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1630 			Throwable e1 = null;
1631 			try {
1632 				runTasks(pool, pm, taskBlock, errors);
1633 			} catch (Exception e) {
1634 				e1 = e;
1635 			} finally {
1636 				pool.shutdown();
1637 				for (;;) {
1638 					try {
1639 						if (pool.awaitTermination(60, TimeUnit.SECONDS)) {
1640 							break;
1641 						}
1642 					} catch (InterruptedException e) {
1643 						if (e1 != null) {
1644 							e.addSuppressed(e1);
1645 						}
1646 						throw new IOException(JGitText
1647 								.get().packingCancelledDuringObjectsWriting, e);
1648 					}
1649 				}
1650 			}
1651 		} else {
1652 			// The caller gave us an executor, but it might not do
1653 			// asynchronous execution.  Wrap everything and hope it
1654 			// can schedule these for us.
1655 			for (DeltaTask task : taskBlock.tasks) {
1656 				executor.execute(() -> {
1657 					try {
1658 						task.call();
1659 					} catch (Throwable failure) {
1660 						errors.add(failure);
1661 					}
1662 				});
1663 			}
1664 			try {
1665 				pm.waitForCompletion();
1666 			} catch (InterruptedException ie) {
1667 				// We can't abort the other tasks as we have no handle.
1668 				// Cross our fingers and just break out anyway.
1669 				//
1670 				throw new IOException(
1671 						JGitText.get().packingCancelledDuringObjectsWriting,
1672 						ie);
1673 			}
1674 		}
1675 
1676 		// If any task threw an error, try to report it back as
1677 		// though we weren't using a threaded search algorithm.
1678 		//
1679 		if (!errors.isEmpty()) {
1680 			Throwable err = errors.get(0);
1681 			if (err instanceof Error)
1682 				throw (Error) err;
1683 			if (err instanceof RuntimeException)
1684 				throw (RuntimeException) err;
1685 			if (err instanceof IOException)
1686 				throw (IOException) err;
1687 
1688 			throw new IOException(err.getMessage(), err);
1689 		}
1690 		endPhase(monitor);
1691 	}
1692 
1693 	private static void runTasks(ExecutorService pool,
1694 			ThreadSafeProgressMonitor pm,
1695 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1696 		List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
1697 		for (DeltaTask task : tb.tasks)
1698 			futures.add(pool.submit(task));
1699 
1700 		try {
1701 			pm.waitForCompletion();
1702 			for (Future<?> f : futures) {
1703 				try {
1704 					f.get();
1705 				} catch (ExecutionException failed) {
1706 					errors.add(failed.getCause());
1707 				}
1708 			}
1709 		} catch (InterruptedException ie) {
1710 			for (Future<?> f : futures)
1711 				f.cancel(true);
1712 			throw new IOException(
1713 					JGitText.get().packingCancelledDuringObjectsWriting, ie);
1714 		}
1715 	}
1716 
1717 	private void writeObjects(PackOutputStream out) throws IOException {
1718 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1719 		writeObjects(out, objectsLists[OBJ_TAG]);
1720 		writeObjects(out, objectsLists[OBJ_TREE]);
1721 		writeObjects(out, objectsLists[OBJ_BLOB]);
1722 	}
1723 
1724 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1725 			throws IOException {
1726 		if (list.isEmpty())
1727 			return;
1728 
1729 		typeStats = stats.objectTypes[list.get(0).getType()];
1730 		long beginOffset = out.length();
1731 
1732 		if (reuseSupport != null) {
1733 			reuseSupport.writeObjects(out, list);
1734 		} else {
1735 			for (ObjectToPack otp : list)
1736 				out.writeObject(otp);
1737 		}
1738 
1739 		typeStats.bytes += out.length() - beginOffset;
1740 		typeStats.cntObjects = list.size();
1741 	}
1742 
1743 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1744 		if (!otp.isWritten())
1745 			writeObjectImpl(out, otp);
1746 	}
1747 
1748 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1749 			throws IOException {
1750 		if (otp.wantWrite()) {
1751 			// A cycle exists in this delta chain. This should only occur if a
1752 			// selected object representation disappeared during writing
1753 			// (for example due to a concurrent repack) and a different base
1754 			// was chosen, forcing a cycle. Select something other than a
1755 			// delta, and write this object.
1756 			reselectNonDelta(otp);
1757 		}
1758 		otp.markWantWrite();
1759 
1760 		while (otp.isReuseAsIs()) {
1761 			writeBase(out, otp.getDeltaBase());
1762 			if (otp.isWritten())
1763 				return; // Delta chain cycle caused this to write already.
1764 
1765 			crc32.reset();
1766 			otp.setOffset(out.length());
1767 			try {
1768 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1769 				out.endObject();
1770 				otp.setCRC((int) crc32.getValue());
1771 				typeStats.reusedObjects++;
1772 				if (otp.isDeltaRepresentation()) {
1773 					typeStats.reusedDeltas++;
1774 					typeStats.deltaBytes += out.length() - otp.getOffset();
1775 				}
1776 				return;
1777 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1778 				if (otp.getOffset() == out.length()) {
1779 					otp.setOffset(0);
1780 					otp.clearDeltaBase();
1781 					otp.clearReuseAsIs();
1782 					reuseSupport.selectObjectRepresentation(this,
1783 							NullProgressMonitor.INSTANCE,
1784 							Collections.singleton(otp));
1785 					continue;
1786 				}
1787 				// Object writing already started, we cannot recover.
1788 				//
1789 				CorruptObjectException coe;
1790 				coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1791 				coe.initCause(gone);
1792 				throw coe;
1793 			}
1794 		}
1795 
1796 		// If we reached here, reuse wasn't possible.
1797 		//
1798 		if (otp.isDeltaRepresentation()) {
1799 			writeDeltaObjectDeflate(out, otp);
1800 		} else {
1801 			writeWholeObjectDeflate(out, otp);
1802 		}
1803 		out.endObject();
1804 		otp.setCRC((int) crc32.getValue());
1805 	}
1806 
1807 	private void writeBase(PackOutputStream out, ObjectToPack base)
1808 			throws IOException {
1809 		if (base != null && !base.isWritten() && !base.isEdge())
1810 			writeObjectImpl(out, base);
1811 	}
1812 
1813 	private void writeWholeObjectDeflate(PackOutputStream out,
1814 			final ObjectToPack otp) throws IOException {
1815 		final Deflater deflater = deflater();
1816 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1817 
1818 		crc32.reset();
1819 		otp.setOffset(out.length());
1820 		out.writeHeader(otp, ldr.getSize());
1821 
1822 		deflater.reset();
1823 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1824 		ldr.copyTo(dst);
1825 		dst.finish();
1826 	}
1827 
1828 	private void writeDeltaObjectDeflate(PackOutputStream out,
1829 			final ObjectToPack otp) throws IOException {
1830 		writeBase(out, otp.getDeltaBase());
1831 
1832 		crc32.reset();
1833 		otp.setOffset(out.length());
1834 
1835 		DeltaCache.Ref ref = otp.popCachedDelta();
1836 		if (ref != null) {
1837 			byte[] zbuf = ref.get();
1838 			if (zbuf != null) {
1839 				out.writeHeader(otp, otp.getCachedSize());
1840 				out.write(zbuf);
1841 				typeStats.cntDeltas++;
1842 				typeStats.deltaBytes += out.length() - otp.getOffset();
1843 				return;
1844 			}
1845 		}
1846 
1847 		try (TemporaryBuffer.Heap delta = delta(otp)) {
1848 			out.writeHeader(otp, delta.length());
1849 
1850 			Deflater deflater = deflater();
1851 			deflater.reset();
1852 			DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1853 			delta.writeTo(dst, null);
1854 			dst.finish();
1855 		}
1856 		typeStats.cntDeltas++;
1857 		typeStats.deltaBytes += out.length() - otp.getOffset();
1858 	}
1859 
1860 	private TemporaryBuffer.Heap delta(ObjectToPack otp)
1861 			throws IOException {
1862 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1863 		byte[] res = buffer(otp);
1864 
1865 		// We never would have proposed this pair if the delta would be
1866 		// larger than the unpacked version of the object. So using it
1867 		// as our buffer limit is valid: we will never reach it.
1868 		//
1869 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1870 		index.encode(delta, res);
1871 		return delta;
1872 	}
1873 
1874 	private byte[] buffer(AnyObjectId objId) throws IOException {
1875 		return buffer(config, reader, objId);
1876 	}
1877 
1878 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1879 			throws IOException {
1880 		// PackWriter should have already pruned objects that
1881 		// are above the big file threshold, so our chances of
1882 		// the object being below it are very good. We really
1883 		// shouldn't be here, unless the implementation is odd.
1884 
1885 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1886 	}
1887 
1888 	private Deflater deflater() {
1889 		if (myDeflater == null)
1890 			myDeflater = new Deflater(config.getCompressionLevel());
1891 		return myDeflater;
1892 	}
1893 
1894 	private void writeChecksum(PackOutputStream out) throws IOException {
1895 		packcsum = out.getDigest();
1896 		out.write(packcsum);
1897 	}
1898 
1899 	private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
1900 			@NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
1901 			@NonNull Set<? extends ObjectId> have,
1902 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
1903 		final long countingStart = System.currentTimeMillis();
1904 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1905 
1906 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1907 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1908 		excludeFromBitmapSelection = noBitmaps;
1909 
1910 		canBuildBitmaps = config.isBuildBitmaps()
1911 				&& !shallowPack
1912 				&& have.isEmpty()
1913 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1914 		if (!shallowPack && useBitmaps) {
1915 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1916 			if (bitmapIndex != null) {
1917 				BitmapWalker bitmapWalker = new BitmapWalker(
1918 						walker, bitmapIndex, countingMonitor);
1919 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1920 				endPhase(countingMonitor);
1921 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1922 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1923 				return;
1924 			}
1925 		}
1926 
1927 		List<ObjectId> all = new ArrayList<>(want.size() + have.size());
1928 		all.addAll(want);
1929 		all.addAll(have);
1930 
1931 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1932 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1933 
1934 		walker.carry(include);
1935 
1936 		int haveEst = have.size();
1937 		if (have.isEmpty()) {
1938 			walker.sort(RevSort.COMMIT_TIME_DESC);
1939 		} else {
1940 			walker.sort(RevSort.TOPO);
1941 			if (thin)
1942 				walker.sort(RevSort.BOUNDARY, true);
1943 		}
1944 
1945 		List<RevObject> wantObjs = new ArrayList<>(want.size());
1946 		List<RevObject> haveObjs = new ArrayList<>(haveEst);
1947 		List<RevTag> wantTags = new ArrayList<>(want.size());
1948 
1949 		// Retrieve the RevWalk's versions of "want" and "have" objects to
1950 		// maintain any state previously set in the RevWalk.
1951 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1952 		try {
1953 			for (;;) {
1954 				try {
1955 					RevObject o = q.next();
1956 					if (o == null)
1957 						break;
1958 					if (have.contains(o))
1959 						haveObjs.add(o);
1960 					if (want.contains(o)) {
1961 						o.add(include);
1962 						wantObjs.add(o);
1963 						if (o instanceof RevTag)
1964 							wantTags.add((RevTag) o);
1965 					}
1966 				} catch (MissingObjectException e) {
1967 					if (ignoreMissingUninteresting
1968 							&& have.contains(e.getObjectId()))
1969 						continue;
1970 					throw e;
1971 				}
1972 			}
1973 		} finally {
1974 			q.release();
1975 		}
1976 
1977 		if (!wantTags.isEmpty()) {
1978 			all = new ArrayList<>(wantTags.size());
1979 			for (RevTag tag : wantTags)
1980 				all.add(tag.getObject());
1981 			q = walker.parseAny(all, true);
1982 			try {
1983 				while (q.next() != null) {
1984 					// Just need to pop the queue item to parse the object.
1985 				}
1986 			} finally {
1987 				q.release();
1988 			}
1989 		}
1990 
1991 		if (walker instanceof DepthWalk.ObjectWalk) {
1992 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1993 			for (RevObject obj : wantObjs) {
1994 				depthWalk.markRoot(obj);
1995 			}
1996 			// Mark the tree objects associated with "have" commits as
1997 			// uninteresting to avoid writing redundant blobs. A normal RevWalk
1998 			// lazily propagates the "uninteresting" state from a commit to its
1999 			// tree during the walk, but DepthWalks can terminate early so
2000 			// preemptively propagate that state here.
2001 			for (RevObject obj : haveObjs) {
2002 				if (obj instanceof RevCommit) {
2003 					RevTree t = ((RevCommit) obj).getTree();
2004 					depthWalk.markUninteresting(t);
2005 				}
2006 			}
2007 
2008 			if (unshallowObjects != null) {
2009 				for (ObjectId id : unshallowObjects) {
2010 					depthWalk.markUnshallow(walker.parseAny(id));
2011 				}
2012 			}
2013 		} else {
2014 			for (RevObject obj : wantObjs)
2015 				walker.markStart(obj);
2016 		}
2017 		for (RevObject obj : haveObjs)
2018 			walker.markUninteresting(obj);
2019 
2020 		final int maxBases = config.getDeltaSearchWindowSize();
2021 		Set<RevTree> baseTrees = new HashSet<>();
2022 		BlockList<RevCommit> commits = new BlockList<>();
2023 		Set<ObjectId> roots = new HashSet<>();
2024 		RevCommit c;
2025 		while ((c = walker.next()) != null) {
2026 			if (exclude(c))
2027 				continue;
2028 			if (c.has(RevFlag.UNINTERESTING)) {
2029 				if (baseTrees.size() <= maxBases)
2030 					baseTrees.add(c.getTree());
2031 				continue;
2032 			}
2033 
2034 			commits.add(c);
2035 			if (c.getParentCount() == 0) {
2036 				roots.add(c.copy());
2037 			}
2038 			countingMonitor.update(1);
2039 		}
2040 		stats.rootCommits = Collections.unmodifiableSet(roots);
2041 
2042 		if (shallowPack) {
2043 			for (RevCommit cmit : commits) {
2044 				addObject(cmit, 0);
2045 			}
2046 		} else {
2047 			int commitCnt = 0;
2048 			boolean putTagTargets = false;
2049 			for (RevCommit cmit : commits) {
2050 				if (!cmit.has(added)) {
2051 					cmit.add(added);
2052 					addObject(cmit, 0);
2053 					commitCnt++;
2054 				}
2055 
2056 				for (int i = 0; i < cmit.getParentCount(); i++) {
2057 					RevCommit p = cmit.getParent(i);
2058 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
2059 							&& !exclude(p)) {
2060 						p.add(added);
2061 						addObject(p, 0);
2062 						commitCnt++;
2063 					}
2064 				}
2065 
2066 				if (!putTagTargets && 4096 < commitCnt) {
2067 					for (ObjectId id : tagTargets) {
2068 						RevObject obj = walker.lookupOrNull(id);
2069 						if (obj instanceof RevCommit
2070 								&& obj.has(include)
2071 								&& !obj.has(RevFlag.UNINTERESTING)
2072 								&& !obj.has(added)) {
2073 							obj.add(added);
2074 							addObject(obj, 0);
2075 						}
2076 					}
2077 					putTagTargets = true;
2078 				}
2079 			}
2080 		}
2081 		commits = null;
2082 
2083 		if (thin && !baseTrees.isEmpty()) {
2084 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
2085 					objectsMap, edgeObjects, reader);
2086 			RevObject o;
2087 			while ((o = walker.nextObject()) != null) {
2088 				if (o.has(RevFlag.UNINTERESTING))
2089 					continue;
2090 				if (exclude(o))
2091 					continue;
2092 
2093 				int pathHash = walker.getPathHashCode();
2094 				byte[] pathBuf = walker.getPathBuffer();
2095 				int pathLen = walker.getPathLength();
2096 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
2097 				if (!depthSkip(o, walker)) {
2098 					filterAndAddObject(o, o.getType(), pathHash, want);
2099 				}
2100 				countingMonitor.update(1);
2101 			}
2102 		} else {
2103 			RevObject o;
2104 			while ((o = walker.nextObject()) != null) {
2105 				if (o.has(RevFlag.UNINTERESTING))
2106 					continue;
2107 				if (exclude(o))
2108 					continue;
2109 				if (!depthSkip(o, walker)) {
2110 					filterAndAddObject(o, o.getType(), walker.getPathHashCode(),
2111 									   want);
2112 				}
2113 				countingMonitor.update(1);
2114 			}
2115 		}
2116 
2117 		for (CachedPack pack : cachedPacks)
2118 			countingMonitor.update((int) pack.getObjectCount());
2119 		endPhase(countingMonitor);
2120 		stats.timeCounting = System.currentTimeMillis() - countingStart;
2121 		stats.bitmapIndexMisses = -1;
2122 	}
2123 
2124 	private void findObjectsToPackUsingBitmaps(
2125 			BitmapWalker bitmapWalker, Set<? extends ObjectId> want,
2126 			Set<? extends ObjectId> have)
2127 			throws MissingObjectException, IncorrectObjectTypeException,
2128 			IOException {
2129 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
2130 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
2131 				false);
2132 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
2133 
2134 		if (useCachedPacks && reuseSupport != null && !reuseValidate
2135 				&& (excludeInPacks == null || excludeInPacks.length == 0))
2136 			cachedPacks.addAll(
2137 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
2138 
2139 		for (BitmapObject obj : needBitmap) {
2140 			ObjectId objectId = obj.getObjectId();
2141 			if (exclude(objectId)) {
2142 				needBitmap.remove(objectId);
2143 				continue;
2144 			}
2145 			filterAndAddObject(objectId, obj.getType(), 0, want);
2146 		}
2147 
2148 		if (thin)
2149 			haveObjects = haveBitmap;
2150 	}
2151 
2152 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
2153 		final int size = list.size();
2154 		int src = 0;
2155 		int dst = 0;
2156 
2157 		for (; src < size; src++) {
2158 			ObjectToPack obj = list.get(src);
2159 			if (obj.isEdge())
2160 				continue;
2161 			if (dst != src)
2162 				list.set(dst, obj);
2163 			dst++;
2164 		}
2165 
2166 		while (dst < list.size())
2167 			list.remove(list.size() - 1);
2168 	}
2169 
2170 	/**
2171 	 * Include one object to the output file.
2172 	 * <p>
2173 	 * Objects are written in the order they are added. If the same object is
2174 	 * added twice, it may be written twice, creating a larger than necessary
2175 	 * file.
2176 	 *
2177 	 * @param object
2178 	 *            the object to add.
2179 	 * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
2180 	 *             the object is an unsupported type.
2181 	 */
2182 	public void addObject(RevObject object)
2183 			throws IncorrectObjectTypeException {
2184 		if (!exclude(object))
2185 			addObject(object, 0);
2186 	}
2187 
2188 	private void addObject(RevObject object, int pathHashCode) {
2189 		addObject(object, object.getType(), pathHashCode);
2190 	}
2191 
2192 	private void addObject(
2193 			final AnyObjectId src, final int type, final int pathHashCode) {
2194 		final ObjectToPack otp;
2195 		if (reuseSupport != null)
2196 			otp = reuseSupport.newObjectToPack(src, type);
2197 		else
2198 			otp = new ObjectToPack(src, type);
2199 		otp.setPathHash(pathHashCode);
2200 		objectsLists[type].add(otp);
2201 		objectsMap.add(otp);
2202 	}
2203 
2204 	/**
2205 	 * Determines if the object should be omitted from the pack as a result of
2206 	 * its depth (probably because of the tree:<depth> filter).
2207 	 * <p>
2208 	 * Causes {@code walker} to skip traversing the current tree, which ought to
2209 	 * have just started traversal, assuming this method is called as soon as a
2210 	 * new depth is reached.
2211 	 * <p>
2212 	 * This method increments the {@code treesTraversed} statistic.
2213 	 *
2214 	 * @param obj
2215 	 *            the object to check whether it should be omitted.
2216 	 * @param walker
2217 	 *            the walker being used for traveresal.
2218 	 * @return whether the given object should be skipped.
2219 	 */
2220 	private boolean depthSkip(@NonNull RevObject obj, ObjectWalk walker) {
2221 		long treeDepth = walker.getTreeDepth();
2222 
2223 		// Check if this object needs to be rejected because it is a tree or
2224 		// blob that is too deep from the root tree.
2225 
2226 		// A blob is considered one level deeper than the tree that contains it.
2227 		if (obj.getType() == OBJ_BLOB) {
2228 			treeDepth++;
2229 		} else {
2230 			stats.treesTraversed++;
2231 		}
2232 
2233 		if (filterSpec.getTreeDepthLimit() < 0 ||
2234 			treeDepth <= filterSpec.getTreeDepthLimit()) {
2235 			return false;
2236 		}
2237 
2238 		walker.skipTree();
2239 		return true;
2240 	}
2241 
2242 	// Adds the given object as an object to be packed, first performing
2243 	// filtering on blobs at or exceeding a given size.
2244 	private void filterAndAddObject(@NonNull AnyObjectId src, int type,
2245 			int pathHashCode, @NonNull Set<? extends AnyObjectId> want)
2246 			throws IOException {
2247 
2248 		// Check if this object needs to be rejected, doing the cheaper
2249 		// checks first.
2250 		boolean reject =
2251 			(!filterSpec.allowsType(type) && !want.contains(src)) ||
2252 			(filterSpec.getBlobLimit() >= 0 &&
2253 				type == OBJ_BLOB &&
2254 				!want.contains(src) &&
2255 				reader.getObjectSize(src, OBJ_BLOB) > filterSpec.getBlobLimit());
2256 		if (!reject) {
2257 			addObject(src, type, pathHashCode);
2258 		}
2259 	}
2260 
2261 	private boolean exclude(AnyObjectId objectId) {
2262 		if (excludeInPacks == null)
2263 			return false;
2264 		if (excludeInPackLast.contains(objectId))
2265 			return true;
2266 		for (ObjectIdSet idx : excludeInPacks) {
2267 			if (idx.contains(objectId)) {
2268 				excludeInPackLast = idx;
2269 				return true;
2270 			}
2271 		}
2272 		return false;
2273 	}
2274 
2275 	/**
2276 	 * Select an object representation for this writer.
2277 	 * <p>
2278 	 * An {@link org.eclipse.jgit.lib.ObjectReader} implementation should invoke
2279 	 * this method once for each representation available for an object, to
2280 	 * allow the writer to find the most suitable one for the output.
2281 	 *
2282 	 * @param otp
2283 	 *            the object being packed.
2284 	 * @param next
2285 	 *            the next available representation from the repository.
2286 	 */
2287 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
2288 		int nFmt = next.getFormat();
2289 
2290 		if (!cachedPacks.isEmpty()) {
2291 			if (otp.isEdge())
2292 				return;
2293 			if (nFmt == PACK_WHOLE || nFmt == PACK_DELTA) {
2294 				for (CachedPack pack : cachedPacks) {
2295 					if (pack.hasObject(otp, next)) {
2296 						otp.setEdge();
2297 						otp.clearDeltaBase();
2298 						otp.clearReuseAsIs();
2299 						pruneCurrentObjectList = true;
2300 						return;
2301 					}
2302 				}
2303 			}
2304 		}
2305 
2306 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
2307 			ObjectId baseId = next.getDeltaBase();
2308 			ObjectToPack ptr = objectsMap.get(baseId);
2309 			if (ptr != null && !ptr.isEdge()) {
2310 				otp.setDeltaBase(ptr);
2311 				otp.setReuseAsIs();
2312 			} else if (thin && have(ptr, baseId)) {
2313 				otp.setDeltaBase(baseId);
2314 				otp.setReuseAsIs();
2315 			} else {
2316 				otp.clearDeltaBase();
2317 				otp.clearReuseAsIs();
2318 			}
2319 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
2320 			int nWeight = next.getWeight();
2321 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
2322 				// We've chosen another PACK_WHOLE format for this object,
2323 				// choose the one that has the smaller compressed size.
2324 				//
2325 				if (otp.getWeight() <= nWeight)
2326 					return;
2327 			}
2328 			otp.clearDeltaBase();
2329 			otp.setReuseAsIs();
2330 			otp.setWeight(nWeight);
2331 		} else {
2332 			otp.clearDeltaBase();
2333 			otp.clearReuseAsIs();
2334 		}
2335 
2336 		otp.setDeltaAttempted(reuseDeltas && next.wasDeltaAttempted());
2337 		otp.select(next);
2338 	}
2339 
2340 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
2341 		return (ptr != null && ptr.isEdge())
2342 				|| (haveObjects != null && haveObjects.contains(objectId));
2343 	}
2344 
2345 	/**
2346 	 * Prepares the bitmaps to be written to the bitmap index file.
2347 	 * <p>
2348 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
2349 	 * object graph at selected commits. Writing a bitmap index is an optional
2350 	 * feature that not all pack users may require.
2351 	 * <p>
2352 	 * Called after {@link #writeIndex(OutputStream)}.
2353 	 * <p>
2354 	 * To reduce memory internal state is cleared during this method, rendering
2355 	 * the PackWriter instance useless for anything further than a call to write
2356 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
2357 	 *
2358 	 * @param pm
2359 	 *            progress monitor to report bitmap building work.
2360 	 * @return whether a bitmap index may be written.
2361 	 * @throws java.io.IOException
2362 	 *             when some I/O problem occur during reading objects.
2363 	 */
2364 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2365 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2366 				|| !cachedPacks.isEmpty())
2367 			return false;
2368 
2369 		if (pm == null)
2370 			pm = NullProgressMonitor.INSTANCE;
2371 
2372 		int numCommits = objectsLists[OBJ_COMMIT].size();
2373 		List<ObjectToPack> byName = sortByName();
2374 		sortedByName = null;
2375 		objectsLists = null;
2376 		objectsMap = null;
2377 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2378 		byName = null;
2379 
2380 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2381 				reader, writeBitmaps, pm, stats.interestingObjects, config);
2382 
2383 		Collection<BitmapCommit> selectedCommits = bitmapPreparer
2384 				.selectCommits(numCommits, excludeFromBitmapSelection);
2385 
2386 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2387 
2388 		BitmapWalker walker = bitmapPreparer.newBitmapWalker();
2389 		AnyObjectId last = null;
2390 		for (BitmapCommit cmit : selectedCommits) {
2391 			if (!cmit.isReuseWalker()) {
2392 				walker = bitmapPreparer.newBitmapWalker();
2393 			}
2394 			BitmapBuilder bitmap = walker.findObjects(
2395 					Collections.singleton(cmit), null, false);
2396 
2397 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2398 				throw new IllegalStateException(MessageFormat.format(
2399 						JGitText.get().bitmapMissingObject, cmit.name(),
2400 						last.name()));
2401 			last = BitmapCommit.copyFrom(cmit).build();
2402 			writeBitmaps.processBitmapForWrite(cmit, bitmap.build(),
2403 					cmit.getFlags());
2404 
2405 			// The bitmap walker should stop when the walk hits the previous
2406 			// commit, which saves time.
2407 			walker.setPrevCommit(last);
2408 			walker.setPrevBitmap(bitmap);
2409 
2410 			pm.update(1);
2411 		}
2412 
2413 		endPhase(pm);
2414 		return true;
2415 	}
2416 
2417 	private boolean reuseDeltaFor(ObjectToPack otp) {
2418 		int type = otp.getType();
2419 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2420 			return true;
2421 		if (type == OBJ_COMMIT)
2422 			return reuseDeltaCommits;
2423 		if (type == OBJ_TAG)
2424 			return false;
2425 		return true;
2426 	}
2427 
2428 	private class MutableState {
2429 		/** Estimated size of a single ObjectToPack instance. */
2430 		// Assume 64-bit pointers, since this is just an estimate.
2431 		private static final long OBJECT_TO_PACK_SIZE =
2432 				(2 * 8)               // Object header
2433 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2434 				+ (8 + 8)             // PackedObjectInfo fields
2435 				+ 8                   // ObjectIdOwnerMap fields
2436 				+ 40                  // AnyObjectId fields
2437 				+ 8;                  // Reference in BlockList
2438 
2439 		private final long totalDeltaSearchBytes;
2440 
2441 		private volatile PackingPhase phase;
2442 
2443 		MutableState() {
2444 			phase = PackingPhase.COUNTING;
2445 			if (config.isDeltaCompress()) {
2446 				int threads = config.getThreads();
2447 				if (threads <= 0)
2448 					threads = Runtime.getRuntime().availableProcessors();
2449 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2450 						+ config.getBigFileThreshold();
2451 			} else
2452 				totalDeltaSearchBytes = 0;
2453 		}
2454 
2455 		State snapshot() {
2456 			long objCnt = 0;
2457 			BlockList<ObjectToPack>[] lists = objectsLists;
2458 			if (lists != null) {
2459 				objCnt += lists[OBJ_COMMIT].size();
2460 				objCnt += lists[OBJ_TREE].size();
2461 				objCnt += lists[OBJ_BLOB].size();
2462 				objCnt += lists[OBJ_TAG].size();
2463 				// Exclude CachedPacks.
2464 			}
2465 
2466 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2467 			PackingPhase curr = phase;
2468 			if (curr == PackingPhase.COMPRESSING)
2469 				bytesUsed += totalDeltaSearchBytes;
2470 			return new State(curr, bytesUsed);
2471 		}
2472 	}
2473 
2474 	/** Possible states that a PackWriter can be in. */
2475 	public enum PackingPhase {
2476 		/** Counting objects phase. */
2477 		COUNTING,
2478 
2479 		/** Getting sizes phase. */
2480 		GETTING_SIZES,
2481 
2482 		/** Finding sources phase. */
2483 		FINDING_SOURCES,
2484 
2485 		/** Compressing objects phase. */
2486 		COMPRESSING,
2487 
2488 		/** Writing objects phase. */
2489 		WRITING,
2490 
2491 		/** Building bitmaps phase. */
2492 		BUILDING_BITMAPS;
2493 	}
2494 
2495 	/** Summary of the current state of a PackWriter. */
2496 	public class State {
2497 		private final PackingPhase phase;
2498 
2499 		private final long bytesUsed;
2500 
2501 		State(PackingPhase phase, long bytesUsed) {
2502 			this.phase = phase;
2503 			this.bytesUsed = bytesUsed;
2504 		}
2505 
2506 		/** @return the PackConfig used to build the writer. */
2507 		public PackConfig getConfig() {
2508 			return config;
2509 		}
2510 
2511 		/** @return the current phase of the writer. */
2512 		public PackingPhase getPhase() {
2513 			return phase;
2514 		}
2515 
2516 		/** @return an estimate of the total memory used by the writer. */
2517 		public long estimateBytesUsed() {
2518 			return bytesUsed;
2519 		}
2520 
2521 		@SuppressWarnings("nls")
2522 		@Override
2523 		public String toString() {
2524 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2525 		}
2526 	}
2527 
2528 	/**
2529 	 * Configuration related to the packfile URI feature.
2530 	 *
2531 	 * @since 5.5
2532 	 */
2533 	public static class PackfileUriConfig {
2534 		@NonNull
2535 		private final PacketLineOut pckOut;
2536 
2537 		@NonNull
2538 		private final Collection<String> protocolsSupported;
2539 
2540 		@NonNull
2541 		private final CachedPackUriProvider cachedPackUriProvider;
2542 
2543 		/**
2544 		 * @param pckOut where to write "packfile-uri" lines to (should
2545 		 *     output to the same stream as the one passed to
2546 		 *     PackWriter#writePack)
2547 		 * @param protocolsSupported list of protocols supported (e.g. "https")
2548 		 * @param cachedPackUriProvider provider of URIs corresponding
2549 		 *     to cached packs
2550 		 * @since 5.5
2551 		 */
2552 		public PackfileUriConfig(@NonNull PacketLineOut pckOut,
2553 				@NonNull Collection<String> protocolsSupported,
2554 				@NonNull CachedPackUriProvider cachedPackUriProvider) {
2555 			this.pckOut = pckOut;
2556 			this.protocolsSupported = protocolsSupported;
2557 			this.cachedPackUriProvider = cachedPackUriProvider;
2558 		}
2559 	}
2560 }