View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com> and others
4    *
5    * This program and the accompanying materials are made available under the
6    * terms of the Eclipse Distribution License v. 1.0 which is available at
7    * https://www.eclipse.org/org/documents/edl-v10.php.
8    *
9    * SPDX-License-Identifier: BSD-3-Clause
10   */
11  
12  package org.eclipse.jgit.internal.storage.pack;
13  
14  import static java.util.Objects.requireNonNull;
15  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
16  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
17  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
18  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
19  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
20  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
21  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
22  
23  import java.io.IOException;
24  import java.io.OutputStream;
25  import java.lang.ref.WeakReference;
26  import java.security.MessageDigest;
27  import java.text.MessageFormat;
28  import java.util.ArrayList;
29  import java.util.Arrays;
30  import java.util.Collection;
31  import java.util.Collections;
32  import java.util.HashMap;
33  import java.util.HashSet;
34  import java.util.Iterator;
35  import java.util.List;
36  import java.util.Map;
37  import java.util.NoSuchElementException;
38  import java.util.Set;
39  import java.util.concurrent.ConcurrentHashMap;
40  import java.util.concurrent.ExecutionException;
41  import java.util.concurrent.Executor;
42  import java.util.concurrent.ExecutorService;
43  import java.util.concurrent.Executors;
44  import java.util.concurrent.Future;
45  import java.util.concurrent.TimeUnit;
46  import java.util.zip.CRC32;
47  import java.util.zip.CheckedOutputStream;
48  import java.util.zip.Deflater;
49  import java.util.zip.DeflaterOutputStream;
50  
51  import org.eclipse.jgit.annotations.NonNull;
52  import org.eclipse.jgit.annotations.Nullable;
53  import org.eclipse.jgit.errors.CorruptObjectException;
54  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
55  import org.eclipse.jgit.errors.LargeObjectException;
56  import org.eclipse.jgit.errors.MissingObjectException;
57  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
58  import org.eclipse.jgit.internal.JGitText;
59  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
60  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
61  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
62  import org.eclipse.jgit.lib.AnyObjectId;
63  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
64  import org.eclipse.jgit.lib.BatchingProgressMonitor;
65  import org.eclipse.jgit.lib.BitmapIndex;
66  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
67  import org.eclipse.jgit.lib.BitmapObject;
68  import org.eclipse.jgit.lib.Constants;
69  import org.eclipse.jgit.lib.NullProgressMonitor;
70  import org.eclipse.jgit.lib.ObjectId;
71  import org.eclipse.jgit.lib.ObjectIdOwnerMap;
72  import org.eclipse.jgit.lib.ObjectIdSet;
73  import org.eclipse.jgit.lib.ObjectLoader;
74  import org.eclipse.jgit.lib.ObjectReader;
75  import org.eclipse.jgit.lib.ProgressMonitor;
76  import org.eclipse.jgit.lib.Repository;
77  import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
78  import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
79  import org.eclipse.jgit.revwalk.BitmapWalker;
80  import org.eclipse.jgit.revwalk.DepthWalk;
81  import org.eclipse.jgit.revwalk.ObjectWalk;
82  import org.eclipse.jgit.revwalk.RevCommit;
83  import org.eclipse.jgit.revwalk.RevFlag;
84  import org.eclipse.jgit.revwalk.RevObject;
85  import org.eclipse.jgit.revwalk.RevSort;
86  import org.eclipse.jgit.revwalk.RevTag;
87  import org.eclipse.jgit.revwalk.RevTree;
88  import org.eclipse.jgit.storage.pack.PackConfig;
89  import org.eclipse.jgit.storage.pack.PackStatistics;
90  import org.eclipse.jgit.transport.FilterSpec;
91  import org.eclipse.jgit.transport.ObjectCountCallback;
92  import org.eclipse.jgit.transport.PacketLineOut;
93  import org.eclipse.jgit.transport.WriteAbortedException;
94  import org.eclipse.jgit.util.BlockList;
95  import org.eclipse.jgit.util.TemporaryBuffer;
96  
97  /**
98   * <p>
99   * PackWriter class is responsible for generating pack files from specified set
100  * of objects from repository. This implementation produce pack files in format
101  * version 2.
102  * </p>
103  * <p>
104  * Source of objects may be specified in two ways:
105  * <ul>
106  * <li>(usually) by providing sets of interesting and uninteresting objects in
107  * repository - all interesting objects and their ancestors except uninteresting
108  * objects and their ancestors will be included in pack, or</li>
109  * <li>by providing iterator of {@link org.eclipse.jgit.revwalk.RevObject}
110  * specifying exact list and order of objects in pack</li>
111  * </ul>
112  * <p>
113  * Typical usage consists of creating an instance, configuring options,
114  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
115  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
116  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
117  * pack is being stored as a file the matching index can be written out after
118  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
119  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
120  * followed by {@link #writeBitmapIndex(OutputStream)}.
121  * </p>
122  * <p>
123  * Class provide set of configurable options and
124  * {@link org.eclipse.jgit.lib.ProgressMonitor} support, as operations may take
125  * a long time for big repositories. Deltas searching algorithm is <b>NOT
126  * IMPLEMENTED</b> yet - this implementation relies only on deltas and objects
127  * reuse.
128  * </p>
129  * <p>
130  * This class is not thread safe. It is intended to be used in one thread as a
131  * single pass to produce one pack. Invoking methods multiple times or out of
132  * order is not supported as internal data structures are destroyed during
133  * certain phases to save memory when packing large repositories.
134  * </p>
135  */
136 public class PackWriter implements AutoCloseable {
137 	private static final int PACK_VERSION_GENERATED = 2;
138 
139 	/** Empty set of objects for {@code preparePack()}. */
140 	public static final Set<ObjectId> NONE = Collections.emptySet();
141 
142 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
143 			new ConcurrentHashMap<>();
144 
145 	private static final Iterable<PackWriter> instancesIterable = () -> new Iterator<PackWriter>() {
146 
147 		private final Iterator<WeakReference<PackWriter>> it = instances
148 				.keySet().iterator();
149 
150 		private PackWriter next;
151 
152 		@Override
153 		public boolean hasNext() {
154 			if (next != null) {
155 				return true;
156 			}
157 			while (it.hasNext()) {
158 				WeakReference<PackWriter> ref = it.next();
159 				next = ref.get();
160 				if (next != null) {
161 					return true;
162 				}
163 				it.remove();
164 			}
165 			return false;
166 		}
167 
168 		@Override
169 		public PackWriter next() {
170 			if (hasNext()) {
171 				PackWriter result = next;
172 				next = null;
173 				return result;
174 			}
175 			throw new NoSuchElementException();
176 		}
177 
178 		@Override
179 		public void remove() {
180 			throw new UnsupportedOperationException();
181 		}
182 	};
183 
184 	/**
185 	 * Get all allocated, non-released PackWriters instances.
186 	 *
187 	 * @return all allocated, non-released PackWriters instances.
188 	 */
189 	public static Iterable<PackWriter> getInstances() {
190 		return instancesIterable;
191 	}
192 
193 	@SuppressWarnings("unchecked")
194 	BlockList<ObjectToPack>[] objectsLists = new BlockList[OBJ_TAG + 1];
195 	{
196 		objectsLists[OBJ_COMMIT] = new BlockList<>();
197 		objectsLists[OBJ_TREE] = new BlockList<>();
198 		objectsLists[OBJ_BLOB] = new BlockList<>();
199 		objectsLists[OBJ_TAG] = new BlockList<>();
200 	}
201 
202 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();
203 
204 	// edge objects for thin packs
205 	private List<ObjectToPack> edgeObjects = new BlockList<>();
206 
207 	// Objects the client is known to have already.
208 	private BitmapBuilder haveObjects;
209 
210 	private List<CachedPack> cachedPacks = new ArrayList<>(2);
211 
212 	private Set<ObjectId> tagTargets = NONE;
213 
214 	private Set<? extends ObjectId> excludeFromBitmapSelection = NONE;
215 
216 	private ObjectIdSet[] excludeInPacks;
217 
218 	private ObjectIdSet excludeInPackLast;
219 
220 	private Deflater myDeflater;
221 
222 	private final ObjectReader reader;
223 
224 	/** {@link #reader} recast to the reuse interface, if it supports it. */
225 	private final ObjectReuseAsIs reuseSupport;
226 
227 	final PackConfig config;
228 
229 	private final PackStatistics.Accumulator stats;
230 
231 	private final MutableState state;
232 
233 	private final WeakReference<PackWriter> selfRef;
234 
235 	private PackStatistics.ObjectType.Accumulator typeStats;
236 
237 	private List<ObjectToPack> sortedByName;
238 
239 	private byte[] packcsum;
240 
241 	private boolean deltaBaseAsOffset;
242 
243 	private boolean reuseDeltas;
244 
245 	private boolean reuseDeltaCommits;
246 
247 	private boolean reuseValidate;
248 
249 	private boolean thin;
250 
251 	private boolean useCachedPacks;
252 
253 	private boolean useBitmaps;
254 
255 	private boolean ignoreMissingUninteresting = true;
256 
257 	private boolean pruneCurrentObjectList;
258 
259 	private boolean shallowPack;
260 
261 	private boolean canBuildBitmaps;
262 
263 	private boolean indexDisabled;
264 
265 	private int depth;
266 
267 	private Collection<? extends ObjectId> unshallowObjects;
268 
269 	private PackBitmapIndexBuilder writeBitmaps;
270 
271 	private CRC32 crc32;
272 
273 	private ObjectCountCallback callback;
274 
275 	private FilterSpec filterSpec = FilterSpec.NO_FILTER;
276 
277 	private PackfileUriConfig packfileUriConfig;
278 
279 	/**
280 	 * Create writer for specified repository.
281 	 * <p>
282 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
283 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
284 	 *
285 	 * @param repo
286 	 *            repository where objects are stored.
287 	 */
288 	public PackWriter(Repository repo) {
289 		this(repo, repo.newObjectReader());
290 	}
291 
292 	/**
293 	 * Create a writer to load objects from the specified reader.
294 	 * <p>
295 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
296 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
297 	 *
298 	 * @param reader
299 	 *            reader to read from the repository with.
300 	 */
301 	public PackWriter(ObjectReader reader) {
302 		this(new PackConfig(), reader);
303 	}
304 
305 	/**
306 	 * Create writer for specified repository.
307 	 * <p>
308 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
309 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
310 	 *
311 	 * @param repo
312 	 *            repository where objects are stored.
313 	 * @param reader
314 	 *            reader to read from the repository with.
315 	 */
316 	public PackWriter(Repository repo, ObjectReader reader) {
317 		this(new PackConfig(repo), reader);
318 	}
319 
320 	/**
321 	 * Create writer with a specified configuration.
322 	 * <p>
323 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
324 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
325 	 *
326 	 * @param config
327 	 *            configuration for the pack writer.
328 	 * @param reader
329 	 *            reader to read from the repository with.
330 	 */
331 	public PackWriter(PackConfig config, ObjectReader reader) {
332 		this(config, reader, null);
333 	}
334 
335 	/**
336 	 * Create writer with a specified configuration.
337 	 * <p>
338 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
339 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
340 	 *
341 	 * @param config
342 	 *            configuration for the pack writer.
343 	 * @param reader
344 	 *            reader to read from the repository with.
345 	 * @param statsAccumulator
346 	 *            accumulator for statics
347 	 */
348 	public PackWriter(PackConfig config, final ObjectReader reader,
349 			@Nullable PackStatistics.Accumulator statsAccumulator) {
350 		this.config = config;
351 		this.reader = reader;
352 		if (reader instanceof ObjectReuseAsIs)
353 			reuseSupport = ((ObjectReuseAsIs) reader);
354 		else
355 			reuseSupport = null;
356 
357 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
358 		reuseDeltas = config.isReuseDeltas();
359 		reuseValidate = true; // be paranoid by default
360 		stats = statsAccumulator != null ? statsAccumulator
361 				: new PackStatistics.Accumulator();
362 		state = new MutableState();
363 		selfRef = new WeakReference<>(this);
364 		instances.put(selfRef, Boolean.TRUE);
365 	}
366 
367 	/**
368 	 * Set the {@code ObjectCountCallback}.
369 	 * <p>
370 	 * It should be set before calling
371 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
372 	 *
373 	 * @param callback
374 	 *            the callback to set
375 	 * @return this object for chaining.
376 	 */
377 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
378 		this.callback = callback;
379 		return this;
380 	}
381 
382 	/**
383 	 * Records the set of shallow commits in the client.
384 	 *
385 	 * @param clientShallowCommits
386 	 *            the shallow commits in the client
387 	 */
388 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
389 		stats.clientShallowCommits = Collections
390 				.unmodifiableSet(new HashSet<>(clientShallowCommits));
391 	}
392 
393 	/**
394 	 * Check whether writer can store delta base as an offset (new style
395 	 * reducing pack size) or should store it as an object id (legacy style,
396 	 * compatible with old readers).
397 	 *
398 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
399 	 *
400 	 * @return true if delta base is stored as an offset; false if it is stored
401 	 *         as an object id.
402 	 */
403 	public boolean isDeltaBaseAsOffset() {
404 		return deltaBaseAsOffset;
405 	}
406 
407 	/**
408 	 * Set writer delta base format. Delta base can be written as an offset in a
409 	 * pack file (new approach reducing file size) or as an object id (legacy
410 	 * approach, compatible with old readers).
411 	 *
412 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
413 	 *
414 	 * @param deltaBaseAsOffset
415 	 *            boolean indicating whether delta base can be stored as an
416 	 *            offset.
417 	 */
418 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
419 		this.deltaBaseAsOffset = deltaBaseAsOffset;
420 	}
421 
422 	/**
423 	 * Check if the writer will reuse commits that are already stored as deltas.
424 	 *
425 	 * @return true if the writer would reuse commits stored as deltas, assuming
426 	 *         delta reuse is already enabled.
427 	 */
428 	public boolean isReuseDeltaCommits() {
429 		return reuseDeltaCommits;
430 	}
431 
432 	/**
433 	 * Set the writer to reuse existing delta versions of commits.
434 	 *
435 	 * @param reuse
436 	 *            if true, the writer will reuse any commits stored as deltas.
437 	 *            By default the writer does not reuse delta commits.
438 	 */
439 	public void setReuseDeltaCommits(boolean reuse) {
440 		reuseDeltaCommits = reuse;
441 	}
442 
443 	/**
444 	 * Check if the writer validates objects before copying them.
445 	 *
446 	 * @return true if validation is enabled; false if the reader will handle
447 	 *         object validation as a side-effect of it consuming the output.
448 	 */
449 	public boolean isReuseValidatingObjects() {
450 		return reuseValidate;
451 	}
452 
453 	/**
454 	 * Enable (or disable) object validation during packing.
455 	 *
456 	 * @param validate
457 	 *            if true the pack writer will validate an object before it is
458 	 *            put into the output. This additional validation work may be
459 	 *            necessary to avoid propagating corruption from one local pack
460 	 *            file to another local pack file.
461 	 */
462 	public void setReuseValidatingObjects(boolean validate) {
463 		reuseValidate = validate;
464 	}
465 
466 	/**
467 	 * Whether this writer is producing a thin pack.
468 	 *
469 	 * @return true if this writer is producing a thin pack.
470 	 */
471 	public boolean isThin() {
472 		return thin;
473 	}
474 
475 	/**
476 	 * Whether writer may pack objects with delta base object not within set of
477 	 * objects to pack
478 	 *
479 	 * @param packthin
480 	 *            a boolean indicating whether writer may pack objects with
481 	 *            delta base object not within set of objects to pack, but
482 	 *            belonging to party repository (uninteresting/boundary) as
483 	 *            determined by set; this kind of pack is used only for
484 	 *            transport; true - to produce thin pack, false - otherwise.
485 	 */
486 	public void setThin(boolean packthin) {
487 		thin = packthin;
488 	}
489 
490 	/**
491 	 * Whether to reuse cached packs.
492 	 *
493 	 * @return {@code true} to reuse cached packs. If true index creation isn't
494 	 *         available.
495 	 */
496 	public boolean isUseCachedPacks() {
497 		return useCachedPacks;
498 	}
499 
500 	/**
501 	 * Whether to use cached packs
502 	 *
503 	 * @param useCached
504 	 *            if set to {@code true} and a cached pack is present, it will
505 	 *            be appended onto the end of a thin-pack, reducing the amount
506 	 *            of working set space and CPU used by PackWriter. Enabling this
507 	 *            feature prevents PackWriter from creating an index for the
508 	 *            newly created pack, so its only suitable for writing to a
509 	 *            network client, where the client will make the index.
510 	 */
511 	public void setUseCachedPacks(boolean useCached) {
512 		useCachedPacks = useCached;
513 	}
514 
515 	/**
516 	 * Whether to use bitmaps
517 	 *
518 	 * @return {@code true} to use bitmaps for ObjectWalks, if available.
519 	 */
520 	public boolean isUseBitmaps() {
521 		return useBitmaps;
522 	}
523 
524 	/**
525 	 * Whether to use bitmaps
526 	 *
527 	 * @param useBitmaps
528 	 *            if set to true, bitmaps will be used when preparing a pack.
529 	 */
530 	public void setUseBitmaps(boolean useBitmaps) {
531 		this.useBitmaps = useBitmaps;
532 	}
533 
534 	/**
535 	 * Whether the index file cannot be created by this PackWriter.
536 	 *
537 	 * @return {@code true} if the index file cannot be created by this
538 	 *         PackWriter.
539 	 */
540 	public boolean isIndexDisabled() {
541 		return indexDisabled || !cachedPacks.isEmpty();
542 	}
543 
544 	/**
545 	 * Whether to disable creation of the index file.
546 	 *
547 	 * @param noIndex
548 	 *            {@code true} to disable creation of the index file.
549 	 */
550 	public void setIndexDisabled(boolean noIndex) {
551 		this.indexDisabled = noIndex;
552 	}
553 
554 	/**
555 	 * Whether to ignore missing uninteresting objects
556 	 *
557 	 * @return {@code true} to ignore objects that are uninteresting and also
558 	 *         not found on local disk; false to throw a
559 	 *         {@link org.eclipse.jgit.errors.MissingObjectException} out of
560 	 *         {@link #preparePack(ProgressMonitor, Set, Set)} if an
561 	 *         uninteresting object is not in the source repository. By default,
562 	 *         true, permitting gracefully ignoring of uninteresting objects.
563 	 */
564 	public boolean isIgnoreMissingUninteresting() {
565 		return ignoreMissingUninteresting;
566 	}
567 
568 	/**
569 	 * Whether writer should ignore non existing uninteresting objects
570 	 *
571 	 * @param ignore
572 	 *            {@code true} if writer should ignore non existing
573 	 *            uninteresting objects during construction set of objects to
574 	 *            pack; false otherwise - non existing uninteresting objects may
575 	 *            cause {@link org.eclipse.jgit.errors.MissingObjectException}
576 	 */
577 	public void setIgnoreMissingUninteresting(boolean ignore) {
578 		ignoreMissingUninteresting = ignore;
579 	}
580 
581 	/**
582 	 * Set the tag targets that should be hoisted earlier during packing.
583 	 * <p>
584 	 * Callers may put objects into this set before invoking any of the
585 	 * preparePack methods to influence where an annotated tag's target is
586 	 * stored within the resulting pack. Typically these will be clustered
587 	 * together, and hoisted earlier in the file even if they are ancient
588 	 * revisions, allowing readers to find tag targets with better locality.
589 	 *
590 	 * @param objects
591 	 *            objects that annotated tags point at.
592 	 */
593 	public void setTagTargets(Set<ObjectId> objects) {
594 		tagTargets = objects;
595 	}
596 
597 	/**
598 	 * Configure this pack for a shallow clone.
599 	 *
600 	 * @param depth
601 	 *            maximum depth of history to return. 1 means return only the
602 	 *            "wants".
603 	 * @param unshallow
604 	 *            objects which used to be shallow on the client, but are being
605 	 *            extended as part of this fetch
606 	 */
607 	public void setShallowPack(int depth,
608 			Collection<? extends ObjectId> unshallow) {
609 		this.shallowPack = true;
610 		this.depth = depth;
611 		this.unshallowObjects = unshallow;
612 	}
613 
614 	/**
615 	 * @param filter the filter which indicates what and what not this writer
616 	 *            should include
617 	 */
618 	public void setFilterSpec(@NonNull FilterSpec filter) {
619 		filterSpec = requireNonNull(filter);
620 	}
621 
622 	/**
623 	 * @param config configuration related to packfile URIs
624 	 * @since 5.5
625 	 */
626 	public void setPackfileUriConfig(PackfileUriConfig config) {
627 		packfileUriConfig = config;
628 	}
629 
630 	/**
631 	 * Returns objects number in a pack file that was created by this writer.
632 	 *
633 	 * @return number of objects in pack.
634 	 * @throws java.io.IOException
635 	 *             a cached pack cannot supply its object count.
636 	 */
637 	public long getObjectCount() throws IOException {
638 		if (stats.totalObjects == 0) {
639 			long objCnt = 0;
640 
641 			objCnt += objectsLists[OBJ_COMMIT].size();
642 			objCnt += objectsLists[OBJ_TREE].size();
643 			objCnt += objectsLists[OBJ_BLOB].size();
644 			objCnt += objectsLists[OBJ_TAG].size();
645 
646 			for (CachedPack pack : cachedPacks)
647 				objCnt += pack.getObjectCount();
648 			return objCnt;
649 		}
650 		return stats.totalObjects;
651 	}
652 
653 	private long getUnoffloadedObjectCount() throws IOException {
654 		long objCnt = 0;
655 
656 		objCnt += objectsLists[OBJ_COMMIT].size();
657 		objCnt += objectsLists[OBJ_TREE].size();
658 		objCnt += objectsLists[OBJ_BLOB].size();
659 		objCnt += objectsLists[OBJ_TAG].size();
660 
661 		for (CachedPack pack : cachedPacks) {
662 			CachedPackUriProvider.PackInfo packInfo =
663 				packfileUriConfig.cachedPackUriProvider.getInfo(
664 					pack, packfileUriConfig.protocolsSupported);
665 			if (packInfo == null) {
666 				objCnt += pack.getObjectCount();
667 			}
668 		}
669 
670 		return objCnt;
671 	}
672 
673 	/**
674 	 * Returns the object ids in the pack file that was created by this writer.
675 	 * <p>
676 	 * This method can only be invoked after
677 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
678 	 * been invoked and completed successfully.
679 	 *
680 	 * @return set of objects in pack.
681 	 * @throws java.io.IOException
682 	 *             a cached pack cannot supply its object ids.
683 	 */
684 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
685 			throws IOException {
686 		if (!cachedPacks.isEmpty())
687 			throw new IOException(
688 					JGitText.get().cachedPacksPreventsListingObjects);
689 
690 		if (writeBitmaps != null) {
691 			return writeBitmaps.getObjectSet();
692 		}
693 
694 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
695 		for (BlockList<ObjectToPack> objList : objectsLists) {
696 			if (objList != null) {
697 				for (ObjectToPack otp : objList)
698 					r.add(new ObjectIdOwnerMap.Entry(otp) {
699 						// A new entry that copies the ObjectId
700 					});
701 			}
702 		}
703 		return r;
704 	}
705 
706 	/**
707 	 * Add a pack index whose contents should be excluded from the result.
708 	 *
709 	 * @param idx
710 	 *            objects in this index will not be in the output pack.
711 	 */
712 	public void excludeObjects(ObjectIdSet idx) {
713 		if (excludeInPacks == null) {
714 			excludeInPacks = new ObjectIdSet[] { idx };
715 			excludeInPackLast = idx;
716 		} else {
717 			int cnt = excludeInPacks.length;
718 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
719 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
720 			newList[cnt] = idx;
721 			excludeInPacks = newList;
722 		}
723 	}
724 
725 	/**
726 	 * Prepare the list of objects to be written to the pack stream.
727 	 * <p>
728 	 * Iterator <b>exactly</b> determines which objects are included in a pack
729 	 * and order they appear in pack (except that objects order by type is not
730 	 * needed at input). This order should conform general rules of ordering
731 	 * objects in git - by recency and path (type and delta-base first is
732 	 * internally secured) and responsibility for guaranteeing this order is on
733 	 * a caller side. Iterator must return each id of object to write exactly
734 	 * once.
735 	 * </p>
736 	 *
737 	 * @param objectsSource
738 	 *            iterator of object to store in a pack; order of objects within
739 	 *            each type is important, ordering by type is not needed;
740 	 *            allowed types for objects are
741 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_COMMIT},
742 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TREE},
743 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_BLOB} and
744 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TAG}; objects
745 	 *            returned by iterator may be later reused by caller as object
746 	 *            id and type are internally copied in each iteration.
747 	 * @throws java.io.IOException
748 	 *             when some I/O problem occur during reading objects.
749 	 */
750 	public void preparePack(@NonNull Iterator<RevObject> objectsSource)
751 			throws IOException {
752 		while (objectsSource.hasNext()) {
753 			addObject(objectsSource.next());
754 		}
755 	}
756 
757 	/**
758 	 * Prepare the list of objects to be written to the pack stream.
759 	 *
760 	 * <p>
761 	 * PackWriter will concat and write out the specified packs as-is.
762 	 *
763 	 * @param c
764 	 *            cached packs to be written.
765 	 */
766 	public void preparePack(Collection<? extends CachedPack> c) {
767 		cachedPacks.addAll(c);
768 	}
769 
770 	/**
771 	 * Prepare the list of objects to be written to the pack stream.
772 	 * <p>
773 	 * Basing on these 2 sets, another set of objects to put in a pack file is
774 	 * created: this set consists of all objects reachable (ancestors) from
775 	 * interesting objects, except uninteresting objects and their ancestors.
776 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
777 	 * extensively to find out that appropriate set of output objects and their
778 	 * optimal order in output pack. Order is consistent with general git
779 	 * in-pack rules: sort by object type, recency, path and delta-base first.
780 	 * </p>
781 	 *
782 	 * @param countingMonitor
783 	 *            progress during object enumeration.
784 	 * @param want
785 	 *            collection of objects to be marked as interesting (start
786 	 *            points of graph traversal). Must not be {@code null}.
787 	 * @param have
788 	 *            collection of objects to be marked as uninteresting (end
789 	 *            points of graph traversal). Pass {@link #NONE} if all objects
790 	 *            reachable from {@code want} are desired, such as when serving
791 	 *            a clone.
792 	 * @throws java.io.IOException
793 	 *             when some I/O problem occur during reading objects.
794 	 */
795 	public void preparePack(ProgressMonitor countingMonitor,
796 			@NonNull Set<? extends ObjectId> want,
797 			@NonNull Set<? extends ObjectId> have) throws IOException {
798 		preparePack(countingMonitor, want, have, NONE, NONE);
799 	}
800 
801 	/**
802 	 * Prepare the list of objects to be written to the pack stream.
803 	 * <p>
804 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
805 	 * specifying commits that should not be walked past ("shallow" commits).
806 	 * The caller is responsible for filtering out commits that should not be
807 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
808 	 * the shallow set.
809 	 *
810 	 * @param countingMonitor
811 	 *            progress during object enumeration.
812 	 * @param want
813 	 *            objects of interest, ancestors of which will be included in
814 	 *            the pack. Must not be {@code null}.
815 	 * @param have
816 	 *            objects whose ancestors (up to and including {@code shallow}
817 	 *            commits) do not need to be included in the pack because they
818 	 *            are already available from elsewhere. Must not be
819 	 *            {@code null}.
820 	 * @param shallow
821 	 *            commits indicating the boundary of the history marked with
822 	 *            {@code have}. Shallow commits have parents but those parents
823 	 *            are considered not to be already available. Parents of
824 	 *            {@code shallow} commits and earlier generations will be
825 	 *            included in the pack if requested by {@code want}. Must not be
826 	 *            {@code null}.
827 	 * @throws java.io.IOException
828 	 *             an I/O problem occurred while reading objects.
829 	 */
830 	public void preparePack(ProgressMonitor countingMonitor,
831 			@NonNull Set<? extends ObjectId> want,
832 			@NonNull Set<? extends ObjectId> have,
833 			@NonNull Set<? extends ObjectId> shallow) throws IOException {
834 		preparePack(countingMonitor, want, have, shallow, NONE);
835 	}
836 
837 	/**
838 	 * Prepare the list of objects to be written to the pack stream.
839 	 * <p>
840 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
841 	 * specifying commits that should not be walked past ("shallow" commits).
842 	 * The caller is responsible for filtering out commits that should not be
843 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
844 	 * the shallow set.
845 	 *
846 	 * @param countingMonitor
847 	 *            progress during object enumeration.
848 	 * @param want
849 	 *            objects of interest, ancestors of which will be included in
850 	 *            the pack. Must not be {@code null}.
851 	 * @param have
852 	 *            objects whose ancestors (up to and including {@code shallow}
853 	 *            commits) do not need to be included in the pack because they
854 	 *            are already available from elsewhere. Must not be
855 	 *            {@code null}.
856 	 * @param shallow
857 	 *            commits indicating the boundary of the history marked with
858 	 *            {@code have}. Shallow commits have parents but those parents
859 	 *            are considered not to be already available. Parents of
860 	 *            {@code shallow} commits and earlier generations will be
861 	 *            included in the pack if requested by {@code want}. Must not be
862 	 *            {@code null}.
863 	 * @param noBitmaps
864 	 *            collection of objects to be excluded from bitmap commit
865 	 *            selection.
866 	 * @throws java.io.IOException
867 	 *             an I/O problem occurred while reading objects.
868 	 */
869 	public void preparePack(ProgressMonitor countingMonitor,
870 			@NonNull Set<? extends ObjectId> want,
871 			@NonNull Set<? extends ObjectId> have,
872 			@NonNull Set<? extends ObjectId> shallow,
873 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
874 		try (ObjectWalk ow = getObjectWalk()) {
875 			ow.assumeShallow(shallow);
876 			preparePack(countingMonitor, ow, want, have, noBitmaps);
877 		}
878 	}
879 
880 	private ObjectWalk getObjectWalk() {
881 		return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
882 				: new ObjectWalk(reader);
883 	}
884 
885 	/**
886 	 * A visitation policy which uses the depth at which the object is seen to
887 	 * decide if re-traversal is necessary. In particular, if the object has
888 	 * already been visited at this depth or shallower, it is not necessary to
889 	 * re-visit at this depth.
890 	 */
891 	private static class DepthAwareVisitationPolicy
892 			implements ObjectWalk.VisitationPolicy {
893 		private final Map<ObjectId, Integer> lowestDepthVisited = new HashMap<>();
894 
895 		private final ObjectWalk walk;
896 
897 		DepthAwareVisitationPolicy(ObjectWalk walk) {
898 			this.walk = requireNonNull(walk);
899 		}
900 
901 		@Override
902 		public boolean shouldVisit(RevObject o) {
903 			Integer lastDepth = lowestDepthVisited.get(o);
904 			if (lastDepth == null) {
905 				return true;
906 			}
907 			return walk.getTreeDepth() < lastDepth.intValue();
908 		}
909 
910 		@Override
911 		public void visited(RevObject o) {
912 			lowestDepthVisited.put(o, Integer.valueOf(walk.getTreeDepth()));
913 		}
914 	}
915 
916 	/**
917 	 * Prepare the list of objects to be written to the pack stream.
918 	 * <p>
919 	 * Basing on these 2 sets, another set of objects to put in a pack file is
920 	 * created: this set consists of all objects reachable (ancestors) from
921 	 * interesting objects, except uninteresting objects and their ancestors.
922 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
923 	 * extensively to find out that appropriate set of output objects and their
924 	 * optimal order in output pack. Order is consistent with general git
925 	 * in-pack rules: sort by object type, recency, path and delta-base first.
926 	 * </p>
927 	 *
928 	 * @param countingMonitor
929 	 *            progress during object enumeration.
930 	 * @param walk
931 	 *            ObjectWalk to perform enumeration.
932 	 * @param interestingObjects
933 	 *            collection of objects to be marked as interesting (start
934 	 *            points of graph traversal). Must not be {@code null}.
935 	 * @param uninterestingObjects
936 	 *            collection of objects to be marked as uninteresting (end
937 	 *            points of graph traversal). Pass {@link #NONE} if all objects
938 	 *            reachable from {@code want} are desired, such as when serving
939 	 *            a clone.
940 	 * @param noBitmaps
941 	 *            collection of objects to be excluded from bitmap commit
942 	 *            selection.
943 	 * @throws java.io.IOException
944 	 *             when some I/O problem occur during reading objects.
945 	 */
946 	public void preparePack(ProgressMonitor countingMonitor,
947 			@NonNull ObjectWalk walk,
948 			@NonNull Set<? extends ObjectId> interestingObjects,
949 			@NonNull Set<? extends ObjectId> uninterestingObjects,
950 			@NonNull Set<? extends ObjectId> noBitmaps)
951 			throws IOException {
952 		if (countingMonitor == null)
953 			countingMonitor = NullProgressMonitor.INSTANCE;
954 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
955 			throw new IllegalArgumentException(
956 					JGitText.get().shallowPacksRequireDepthWalk);
957 		if (filterSpec.getTreeDepthLimit() >= 0) {
958 			walk.setVisitationPolicy(new DepthAwareVisitationPolicy(walk));
959 		}
960 		findObjectsToPack(countingMonitor, walk, interestingObjects,
961 				uninterestingObjects, noBitmaps);
962 	}
963 
964 	/**
965 	 * Determine if the pack file will contain the requested object.
966 	 *
967 	 * @param id
968 	 *            the object to test the existence of.
969 	 * @return true if the object will appear in the output pack file.
970 	 * @throws java.io.IOException
971 	 *             a cached pack cannot be examined.
972 	 */
973 	public boolean willInclude(AnyObjectId id) throws IOException {
974 		ObjectToPack obj = objectsMap.get(id);
975 		return obj != null && !obj.isEdge();
976 	}
977 
978 	/**
979 	 * Lookup the ObjectToPack object for a given ObjectId.
980 	 *
981 	 * @param id
982 	 *            the object to find in the pack.
983 	 * @return the object we are packing, or null.
984 	 */
985 	public ObjectToPack get(AnyObjectId id) {
986 		ObjectToPack obj = objectsMap.get(id);
987 		return obj != null && !obj.isEdge() ? obj : null;
988 	}
989 
990 	/**
991 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
992 	 * pack, as used to name a pack file in repository.
993 	 *
994 	 * @return ObjectId representing SHA-1 name of a pack that was created.
995 	 */
996 	public ObjectId computeName() {
997 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
998 		final MessageDigest md = Constants.newMessageDigest();
999 		for (ObjectToPack otp : sortByName()) {
1000 			otp.copyRawTo(buf, 0);
1001 			md.update(buf, 0, OBJECT_ID_LENGTH);
1002 		}
1003 		return ObjectId.fromRaw(md.digest());
1004 	}
1005 
1006 	/**
1007 	 * Returns the index format version that will be written.
1008 	 * <p>
1009 	 * This method can only be invoked after
1010 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
1011 	 * been invoked and completed successfully.
1012 	 *
1013 	 * @return the index format version.
1014 	 */
1015 	public int getIndexVersion() {
1016 		int indexVersion = config.getIndexVersion();
1017 		if (indexVersion <= 0) {
1018 			for (BlockList<ObjectToPack> objs : objectsLists)
1019 				indexVersion = Math.max(indexVersion,
1020 						PackIndexWriter.oldestPossibleFormat(objs));
1021 		}
1022 		return indexVersion;
1023 	}
1024 
1025 	/**
1026 	 * Create an index file to match the pack file just written.
1027 	 * <p>
1028 	 * Called after
1029 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
1030 	 * <p>
1031 	 * Writing an index is only required for local pack storage. Packs sent on
1032 	 * the network do not need to create an index.
1033 	 *
1034 	 * @param indexStream
1035 	 *            output for the index data. Caller is responsible for closing
1036 	 *            this stream.
1037 	 * @throws java.io.IOException
1038 	 *             the index data could not be written to the supplied stream.
1039 	 */
1040 	public void writeIndex(OutputStream indexStream) throws IOException {
1041 		if (isIndexDisabled())
1042 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
1043 
1044 		long writeStart = System.currentTimeMillis();
1045 		final PackIndexWriter iw = PackIndexWriter.createVersion(
1046 				indexStream, getIndexVersion());
1047 		iw.write(sortByName(), packcsum);
1048 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1049 	}
1050 
1051 	/**
1052 	 * Create a bitmap index file to match the pack file just written.
1053 	 * <p>
1054 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
1055 	 *
1056 	 * @param bitmapIndexStream
1057 	 *            output for the bitmap index data. Caller is responsible for
1058 	 *            closing this stream.
1059 	 * @throws java.io.IOException
1060 	 *             the index data could not be written to the supplied stream.
1061 	 */
1062 	public void writeBitmapIndex(OutputStream bitmapIndexStream)
1063 			throws IOException {
1064 		if (writeBitmaps == null)
1065 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
1066 
1067 		long writeStart = System.currentTimeMillis();
1068 		final PackBitmapIndexWriterV1ile/PackBitmapIndexWriterV1.html#PackBitmapIndexWriterV1">PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
1069 		iw.write(writeBitmaps, packcsum);
1070 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1071 	}
1072 
1073 	private List<ObjectToPack> sortByName() {
1074 		if (sortedByName == null) {
1075 			int cnt = 0;
1076 			cnt += objectsLists[OBJ_COMMIT].size();
1077 			cnt += objectsLists[OBJ_TREE].size();
1078 			cnt += objectsLists[OBJ_BLOB].size();
1079 			cnt += objectsLists[OBJ_TAG].size();
1080 
1081 			sortedByName = new BlockList<>(cnt);
1082 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
1083 			sortedByName.addAll(objectsLists[OBJ_TREE]);
1084 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
1085 			sortedByName.addAll(objectsLists[OBJ_TAG]);
1086 			Collections.sort(sortedByName);
1087 		}
1088 		return sortedByName;
1089 	}
1090 
1091 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
1092 			long cnt) {
1093 		state.phase = phase;
1094 		String task;
1095 		switch (phase) {
1096 		case COUNTING:
1097 			task = JGitText.get().countingObjects;
1098 			break;
1099 		case GETTING_SIZES:
1100 			task = JGitText.get().searchForSizes;
1101 			break;
1102 		case FINDING_SOURCES:
1103 			task = JGitText.get().searchForReuse;
1104 			break;
1105 		case COMPRESSING:
1106 			task = JGitText.get().compressingObjects;
1107 			break;
1108 		case WRITING:
1109 			task = JGitText.get().writingObjects;
1110 			break;
1111 		case BUILDING_BITMAPS:
1112 			task = JGitText.get().buildingBitmaps;
1113 			break;
1114 		default:
1115 			throw new IllegalArgumentException(
1116 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
1117 		}
1118 		monitor.beginTask(task, (int) cnt);
1119 	}
1120 
1121 	private void endPhase(ProgressMonitor monitor) {
1122 		monitor.endTask();
1123 	}
1124 
1125 	/**
1126 	 * Write the prepared pack to the supplied stream.
1127 	 * <p>
1128 	 * Called after
1129 	 * {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set, Set)} or
1130 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
1131 	 * <p>
1132 	 * Performs delta search if enabled and writes the pack stream.
1133 	 * <p>
1134 	 * All reused objects data checksum (Adler32/CRC32) is computed and
1135 	 * validated against existing checksum.
1136 	 *
1137 	 * @param compressMonitor
1138 	 *            progress monitor to report object compression work.
1139 	 * @param writeMonitor
1140 	 *            progress monitor to report the number of objects written.
1141 	 * @param packStream
1142 	 *            output stream of pack data. The stream should be buffered by
1143 	 *            the caller. The caller is responsible for closing the stream.
1144 	 * @throws java.io.IOException
1145 	 *             an error occurred reading a local object's data to include in
1146 	 *             the pack, or writing compressed object data to the output
1147 	 *             stream.
1148 	 * @throws WriteAbortedException
1149 	 *             the write operation is aborted by
1150 	 *             {@link org.eclipse.jgit.transport.ObjectCountCallback} .
1151 	 */
1152 	public void writePack(ProgressMonitor compressMonitor,
1153 			ProgressMonitor writeMonitor, OutputStream packStream)
1154 			throws IOException {
1155 		if (compressMonitor == null)
1156 			compressMonitor = NullProgressMonitor.INSTANCE;
1157 		if (writeMonitor == null)
1158 			writeMonitor = NullProgressMonitor.INSTANCE;
1159 
1160 		excludeInPacks = null;
1161 		excludeInPackLast = null;
1162 
1163 		boolean needSearchForReuse = reuseSupport != null && (
1164 				   reuseDeltas
1165 				|| config.isReuseObjects()
1166 				|| !cachedPacks.isEmpty());
1167 
1168 		if (compressMonitor instanceof BatchingProgressMonitor) {
1169 			long delay = 1000;
1170 			if (needSearchForReuse && config.isDeltaCompress())
1171 				delay = 500;
1172 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
1173 					delay,
1174 					TimeUnit.MILLISECONDS);
1175 		}
1176 
1177 		if (needSearchForReuse)
1178 			searchForReuse(compressMonitor);
1179 		if (config.isDeltaCompress())
1180 			searchForDeltas(compressMonitor);
1181 
1182 		crc32 = new CRC32();
1183 		final PackOutputStreamrage/pack/PackOutputStream.html#PackOutputStream">PackOutputStream out = new PackOutputStream(
1184 			writeMonitor,
1185 			isIndexDisabled()
1186 				? packStream
1187 				: new CheckedOutputStream(packStream, crc32),
1188 			this);
1189 
1190 		long objCnt = packfileUriConfig == null ? getObjectCount() :
1191 			getUnoffloadedObjectCount();
1192 		stats.totalObjects = objCnt;
1193 		if (callback != null)
1194 			callback.setObjectCount(objCnt);
1195 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
1196 		long writeStart = System.currentTimeMillis();
1197 		try {
1198 			List<CachedPack> unwrittenCachedPacks;
1199 
1200 			if (packfileUriConfig != null) {
1201 				unwrittenCachedPacks = new ArrayList<>();
1202 				CachedPackUriProvider p = packfileUriConfig.cachedPackUriProvider;
1203 				PacketLineOut o = packfileUriConfig.pckOut;
1204 
1205 				o.writeString("packfile-uris\n"); //$NON-NLS-1$
1206 				for (CachedPack pack : cachedPacks) {
1207 					CachedPackUriProvider.PackInfo packInfo = p.getInfo(
1208 							pack, packfileUriConfig.protocolsSupported);
1209 					if (packInfo != null) {
1210 						o.writeString(packInfo.getHash() + ' ' +
1211 								packInfo.getUri() + '\n');
1212 						stats.offloadedPackfiles += 1;
1213 						stats.offloadedPackfileSize += packInfo.getSize();
1214 					} else {
1215 						unwrittenCachedPacks.add(pack);
1216 					}
1217 				}
1218 				packfileUriConfig.pckOut.writeDelim();
1219 				packfileUriConfig.pckOut.writeString("packfile\n"); //$NON-NLS-1$
1220 			} else {
1221 				unwrittenCachedPacks = cachedPacks;
1222 			}
1223 
1224 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
1225 			out.flush();
1226 
1227 			writeObjects(out);
1228 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1229 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1230 					if (typeStat == null)
1231 						continue;
1232 					stats.thinPackBytes += typeStat.bytes;
1233 				}
1234 			}
1235 
1236 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1237 			for (CachedPack pack : unwrittenCachedPacks) {
1238 				long deltaCnt = pack.getDeltaCount();
1239 				stats.reusedObjects += pack.getObjectCount();
1240 				stats.reusedDeltas += deltaCnt;
1241 				stats.totalDeltas += deltaCnt;
1242 				reuseSupport.copyPackAsIs(out, pack);
1243 			}
1244 			writeChecksum(out);
1245 			out.flush();
1246 		} finally {
1247 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1248 			stats.depth = depth;
1249 
1250 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1251 				if (typeStat == null)
1252 					continue;
1253 				typeStat.cntDeltas += typeStat.reusedDeltas;
1254 				stats.reusedObjects += typeStat.reusedObjects;
1255 				stats.reusedDeltas += typeStat.reusedDeltas;
1256 				stats.totalDeltas += typeStat.cntDeltas;
1257 			}
1258 		}
1259 
1260 		stats.totalBytes = out.length();
1261 		reader.close();
1262 		endPhase(writeMonitor);
1263 	}
1264 
1265 	/**
1266 	 * Get statistics of what this PackWriter did in order to create the final
1267 	 * pack stream.
1268 	 *
1269 	 * @return description of what this PackWriter did in order to create the
1270 	 *         final pack stream. This should only be invoked after the calls to
1271 	 *         create the pack/index/bitmap have completed.
1272 	 */
1273 	public PackStatistics getStatistics() {
1274 		return new PackStatistics(stats);
1275 	}
1276 
1277 	/**
1278 	 * Get snapshot of the current state of this PackWriter.
1279 	 *
1280 	 * @return snapshot of the current state of this PackWriter.
1281 	 */
1282 	public State getState() {
1283 		return state.snapshot();
1284 	}
1285 
1286 	/**
1287 	 * {@inheritDoc}
1288 	 * <p>
1289 	 * Release all resources used by this writer.
1290 	 */
1291 	@Override
1292 	public void close() {
1293 		reader.close();
1294 		if (myDeflater != null) {
1295 			myDeflater.end();
1296 			myDeflater = null;
1297 		}
1298 		instances.remove(selfRef);
1299 	}
1300 
1301 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1302 		long cnt = 0;
1303 		cnt += objectsLists[OBJ_COMMIT].size();
1304 		cnt += objectsLists[OBJ_TREE].size();
1305 		cnt += objectsLists[OBJ_BLOB].size();
1306 		cnt += objectsLists[OBJ_TAG].size();
1307 
1308 		long start = System.currentTimeMillis();
1309 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1310 		if (cnt <= 4096) {
1311 			// For small object counts, do everything as one list.
1312 			BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
1313 			tmp.addAll(objectsLists[OBJ_TAG]);
1314 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1315 			tmp.addAll(objectsLists[OBJ_TREE]);
1316 			tmp.addAll(objectsLists[OBJ_BLOB]);
1317 			searchForReuse(monitor, tmp);
1318 			if (pruneCurrentObjectList) {
1319 				// If the list was pruned, we need to re-prune the main lists.
1320 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1321 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1322 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1323 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1324 			}
1325 		} else {
1326 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1327 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1328 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1329 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1330 		}
1331 		endPhase(monitor);
1332 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1333 
1334 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1335 			cutDeltaChains(objectsLists[OBJ_TREE]);
1336 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1337 		}
1338 	}
1339 
1340 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1341 			throws IOException, MissingObjectException {
1342 		pruneCurrentObjectList = false;
1343 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1344 		if (pruneCurrentObjectList)
1345 			pruneEdgesFromObjectList(list);
1346 	}
1347 
1348 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1349 			throws IOException {
1350 		int max = config.getMaxDeltaDepth();
1351 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1352 			int d = 0;
1353 			ObjectToPack b = list.get(idx).getDeltaBase();
1354 			while (b != null) {
1355 				if (d < b.getChainLength())
1356 					break;
1357 				b.setChainLength(++d);
1358 				if (d >= max && b.isDeltaRepresentation()) {
1359 					reselectNonDelta(b);
1360 					break;
1361 				}
1362 				b = b.getDeltaBase();
1363 			}
1364 		}
1365 		if (config.isDeltaCompress()) {
1366 			for (ObjectToPack otp : list)
1367 				otp.clearChainLength();
1368 		}
1369 	}
1370 
1371 	private void searchForDeltas(ProgressMonitor monitor)
1372 			throws MissingObjectException, IncorrectObjectTypeException,
1373 			IOException {
1374 		// Commits and annotated tags tend to have too many differences to
1375 		// really benefit from delta compression. Consequently just don't
1376 		// bother examining those types here.
1377 		//
1378 		ObjectToPack[] list = new ObjectToPack[
1379 				  objectsLists[OBJ_TREE].size()
1380 				+ objectsLists[OBJ_BLOB].size()
1381 				+ edgeObjects.size()];
1382 		int cnt = 0;
1383 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1384 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1385 		if (cnt == 0)
1386 			return;
1387 		int nonEdgeCnt = cnt;
1388 
1389 		// Queue up any edge objects that we might delta against.  We won't
1390 		// be sending these as we assume the other side has them, but we need
1391 		// them in the search phase below.
1392 		//
1393 		for (ObjectToPack eo : edgeObjects) {
1394 			eo.setWeight(0);
1395 			list[cnt++] = eo;
1396 		}
1397 
1398 		// Compute the sizes of the objects so we can do a proper sort.
1399 		// We let the reader skip missing objects if it chooses. For
1400 		// some readers this can be a huge win. We detect missing objects
1401 		// by having set the weights above to 0 and allowing the delta
1402 		// search code to discover the missing object and skip over it, or
1403 		// abort with an exception if we actually had to have it.
1404 		//
1405 		final long sizingStart = System.currentTimeMillis();
1406 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1407 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1408 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1409 		try {
1410 			final long limit = Math.min(
1411 					config.getBigFileThreshold(),
1412 					Integer.MAX_VALUE);
1413 			for (;;) {
1414 				try {
1415 					if (!sizeQueue.next())
1416 						break;
1417 				} catch (MissingObjectException notFound) {
1418 					monitor.update(1);
1419 					if (ignoreMissingUninteresting) {
1420 						ObjectToPack otp = sizeQueue.getCurrent();
1421 						if (otp != null && otp.isEdge()) {
1422 							otp.setDoNotDelta();
1423 							continue;
1424 						}
1425 
1426 						otp = objectsMap.get(notFound.getObjectId());
1427 						if (otp != null && otp.isEdge()) {
1428 							otp.setDoNotDelta();
1429 							continue;
1430 						}
1431 					}
1432 					throw notFound;
1433 				}
1434 
1435 				ObjectToPack otp = sizeQueue.getCurrent();
1436 				if (otp == null)
1437 					otp = objectsMap.get(sizeQueue.getObjectId());
1438 
1439 				long sz = sizeQueue.getSize();
1440 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1441 					otp.setWeight((int) sz);
1442 				else
1443 					otp.setDoNotDelta(); // too small, or too big
1444 				monitor.update(1);
1445 			}
1446 		} finally {
1447 			sizeQueue.release();
1448 		}
1449 		endPhase(monitor);
1450 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1451 
1452 		// Sort the objects by path hash so like files are near each other,
1453 		// and then by size descending so that bigger files are first. This
1454 		// applies "Linus' Law" which states that newer files tend to be the
1455 		// bigger ones, because source files grow and hardly ever shrink.
1456 		//
1457 		Arrays.sort(list, 0, cnt, (ObjectToPack"../../../../../../org/eclipse/jgit/internal/storage/pack/ObjectToPack.html#ObjectToPack">ObjectToPack a, ObjectToPack b) -> {
1458 			int cmp = (a.isDoNotDelta() ? 1 : 0) - (b.isDoNotDelta() ? 1 : 0);
1459 			if (cmp != 0) {
1460 				return cmp;
1461 			}
1462 
1463 			cmp = a.getType() - b.getType();
1464 			if (cmp != 0) {
1465 				return cmp;
1466 			}
1467 
1468 			cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1469 			if (cmp != 0) {
1470 				return cmp;
1471 			}
1472 
1473 			cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1474 			if (cmp != 0) {
1475 				return cmp;
1476 			}
1477 
1478 			cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1479 			if (cmp != 0) {
1480 				return cmp;
1481 			}
1482 
1483 			return b.getWeight() - a.getWeight();
1484 		});
1485 
1486 		// Above we stored the objects we cannot delta onto the end.
1487 		// Remove them from the list so we don't waste time on them.
1488 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1489 			if (!list[cnt - 1].isEdge())
1490 				nonEdgeCnt--;
1491 			cnt--;
1492 		}
1493 		if (cnt == 0)
1494 			return;
1495 
1496 		final long searchStart = System.currentTimeMillis();
1497 		searchForDeltas(monitor, list, cnt);
1498 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1499 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1500 
1501 		for (int i = 0; i < cnt; i++)
1502 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1503 				stats.deltasFound++;
1504 	}
1505 
1506 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1507 		for (ObjectToPack otp : objectsLists[type]) {
1508 			if (otp.isDoNotDelta()) // delta is disabled for this path
1509 				continue;
1510 			if (otp.isDeltaRepresentation()) // already reusing a delta
1511 				continue;
1512 			otp.setWeight(0);
1513 			list[cnt++] = otp;
1514 		}
1515 		return cnt;
1516 	}
1517 
1518 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1519 		otp.clearDeltaBase();
1520 		otp.clearReuseAsIs();
1521 		boolean old = reuseDeltas;
1522 		reuseDeltas = false;
1523 		reuseSupport.selectObjectRepresentation(this,
1524 				NullProgressMonitor.INSTANCE,
1525 				Collections.singleton(otp));
1526 		reuseDeltas = old;
1527 	}
1528 
1529 	private void searchForDeltas(final ProgressMonitor monitor,
1530 			final ObjectToPack[] list, final int cnt)
1531 			throws MissingObjectException, IncorrectObjectTypeException,
1532 			LargeObjectException, IOException {
1533 		int threads = config.getThreads();
1534 		if (threads == 0)
1535 			threads = Runtime.getRuntime().availableProcessors();
1536 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1537 			singleThreadDeltaSearch(monitor, list, cnt);
1538 		else
1539 			parallelDeltaSearch(monitor, list, cnt, threads);
1540 	}
1541 
1542 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1543 			ObjectToPack[] list, int cnt) throws IOException {
1544 		long totalWeight = 0;
1545 		for (int i = 0; i < cnt; i++) {
1546 			ObjectToPack o = list[i];
1547 			totalWeight += DeltaTask.getAdjustedWeight(o);
1548 		}
1549 
1550 		long bytesPerUnit = 1;
1551 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1552 			bytesPerUnit <<= 10;
1553 		int cost = (int) (totalWeight / bytesPerUnit);
1554 		if (totalWeight % bytesPerUnit != 0)
1555 			cost++;
1556 
1557 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1558 		new DeltaWindow(config, new DeltaCache(config), reader,
1559 				monitor, bytesPerUnit,
1560 				list, 0, cnt).search();
1561 		endPhase(monitor);
1562 	}
1563 
1564 	@SuppressWarnings("Finally")
1565 	private void parallelDeltaSearch(ProgressMonitor monitor,
1566 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1567 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1568 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1569 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1570 				reader, dc, pm,
1571 				list, 0, cnt);
1572 		taskBlock.partitionTasks();
1573 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1574 		pm.startWorkers(taskBlock.tasks.size());
1575 
1576 		Executor executor = config.getExecutor();
1577 		final List<Throwable> errors =
1578 				Collections.synchronizedList(new ArrayList<>(threads));
1579 		if (executor instanceof ExecutorService) {
1580 			// Caller supplied us a service, use it directly.
1581 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1582 		} else if (executor == null) {
1583 			// Caller didn't give us a way to run the tasks, spawn up a
1584 			// temporary thread pool and make sure it tears down cleanly.
1585 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1586 			Throwable e1 = null;
1587 			try {
1588 				runTasks(pool, pm, taskBlock, errors);
1589 			} catch (Exception e) {
1590 				e1 = e;
1591 			} finally {
1592 				pool.shutdown();
1593 				for (;;) {
1594 					try {
1595 						if (pool.awaitTermination(60, TimeUnit.SECONDS)) {
1596 							break;
1597 						}
1598 					} catch (InterruptedException e) {
1599 						if (e1 != null) {
1600 							e.addSuppressed(e1);
1601 						}
1602 						throw new IOException(JGitText
1603 								.get().packingCancelledDuringObjectsWriting, e);
1604 					}
1605 				}
1606 			}
1607 		} else {
1608 			// The caller gave us an executor, but it might not do
1609 			// asynchronous execution.  Wrap everything and hope it
1610 			// can schedule these for us.
1611 			for (DeltaTask task : taskBlock.tasks) {
1612 				executor.execute(() -> {
1613 					try {
1614 						task.call();
1615 					} catch (Throwable failure) {
1616 						errors.add(failure);
1617 					}
1618 				});
1619 			}
1620 			try {
1621 				pm.waitForCompletion();
1622 			} catch (InterruptedException ie) {
1623 				// We can't abort the other tasks as we have no handle.
1624 				// Cross our fingers and just break out anyway.
1625 				//
1626 				throw new IOException(
1627 						JGitText.get().packingCancelledDuringObjectsWriting,
1628 						ie);
1629 			}
1630 		}
1631 
1632 		// If any task threw an error, try to report it back as
1633 		// though we weren't using a threaded search algorithm.
1634 		//
1635 		if (!errors.isEmpty()) {
1636 			Throwable err = errors.get(0);
1637 			if (err instanceof Error)
1638 				throw (Error) err;
1639 			if (err instanceof RuntimeException)
1640 				throw (RuntimeException) err;
1641 			if (err instanceof IOException)
1642 				throw (IOException) err;
1643 
1644 			throw new IOException(err.getMessage(), err);
1645 		}
1646 		endPhase(monitor);
1647 	}
1648 
1649 	private static void runTasks(ExecutorService pool,
1650 			ThreadSafeProgressMonitor pm,
1651 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1652 		List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
1653 		for (DeltaTask task : tb.tasks)
1654 			futures.add(pool.submit(task));
1655 
1656 		try {
1657 			pm.waitForCompletion();
1658 			for (Future<?> f : futures) {
1659 				try {
1660 					f.get();
1661 				} catch (ExecutionException failed) {
1662 					errors.add(failed.getCause());
1663 				}
1664 			}
1665 		} catch (InterruptedException ie) {
1666 			for (Future<?> f : futures)
1667 				f.cancel(true);
1668 			throw new IOException(
1669 					JGitText.get().packingCancelledDuringObjectsWriting, ie);
1670 		}
1671 	}
1672 
1673 	private void writeObjects(PackOutputStream out) throws IOException {
1674 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1675 		writeObjects(out, objectsLists[OBJ_TAG]);
1676 		writeObjects(out, objectsLists[OBJ_TREE]);
1677 		writeObjects(out, objectsLists[OBJ_BLOB]);
1678 	}
1679 
1680 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1681 			throws IOException {
1682 		if (list.isEmpty())
1683 			return;
1684 
1685 		typeStats = stats.objectTypes[list.get(0).getType()];
1686 		long beginOffset = out.length();
1687 
1688 		if (reuseSupport != null) {
1689 			reuseSupport.writeObjects(out, list);
1690 		} else {
1691 			for (ObjectToPack otp : list)
1692 				out.writeObject(otp);
1693 		}
1694 
1695 		typeStats.bytes += out.length() - beginOffset;
1696 		typeStats.cntObjects = list.size();
1697 	}
1698 
1699 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1700 		if (!otp.isWritten())
1701 			writeObjectImpl(out, otp);
1702 	}
1703 
1704 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1705 			throws IOException {
1706 		if (otp.wantWrite()) {
1707 			// A cycle exists in this delta chain. This should only occur if a
1708 			// selected object representation disappeared during writing
1709 			// (for example due to a concurrent repack) and a different base
1710 			// was chosen, forcing a cycle. Select something other than a
1711 			// delta, and write this object.
1712 			reselectNonDelta(otp);
1713 		}
1714 		otp.markWantWrite();
1715 
1716 		while (otp.isReuseAsIs()) {
1717 			writeBase(out, otp.getDeltaBase());
1718 			if (otp.isWritten())
1719 				return; // Delta chain cycle caused this to write already.
1720 
1721 			crc32.reset();
1722 			otp.setOffset(out.length());
1723 			try {
1724 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1725 				out.endObject();
1726 				otp.setCRC((int) crc32.getValue());
1727 				typeStats.reusedObjects++;
1728 				if (otp.isDeltaRepresentation()) {
1729 					typeStats.reusedDeltas++;
1730 					typeStats.deltaBytes += out.length() - otp.getOffset();
1731 				}
1732 				return;
1733 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1734 				if (otp.getOffset() == out.length()) {
1735 					otp.setOffset(0);
1736 					otp.clearDeltaBase();
1737 					otp.clearReuseAsIs();
1738 					reuseSupport.selectObjectRepresentation(this,
1739 							NullProgressMonitor.INSTANCE,
1740 							Collections.singleton(otp));
1741 					continue;
1742 				}
1743 				// Object writing already started, we cannot recover.
1744 				//
1745 				CorruptObjectException coe;
1746 				coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1747 				coe.initCause(gone);
1748 				throw coe;
1749 			}
1750 		}
1751 
1752 		// If we reached here, reuse wasn't possible.
1753 		//
1754 		if (otp.isDeltaRepresentation()) {
1755 			writeDeltaObjectDeflate(out, otp);
1756 		} else {
1757 			writeWholeObjectDeflate(out, otp);
1758 		}
1759 		out.endObject();
1760 		otp.setCRC((int) crc32.getValue());
1761 	}
1762 
1763 	private void writeBase(PackOutputStream out, ObjectToPack base)
1764 			throws IOException {
1765 		if (base != null && !base.isWritten() && !base.isEdge())
1766 			writeObjectImpl(out, base);
1767 	}
1768 
1769 	private void writeWholeObjectDeflate(PackOutputStream out,
1770 			final ObjectToPack otp) throws IOException {
1771 		final Deflater deflater = deflater();
1772 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1773 
1774 		crc32.reset();
1775 		otp.setOffset(out.length());
1776 		out.writeHeader(otp, ldr.getSize());
1777 
1778 		deflater.reset();
1779 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1780 		ldr.copyTo(dst);
1781 		dst.finish();
1782 	}
1783 
1784 	private void writeDeltaObjectDeflate(PackOutputStream out,
1785 			final ObjectToPack otp) throws IOException {
1786 		writeBase(out, otp.getDeltaBase());
1787 
1788 		crc32.reset();
1789 		otp.setOffset(out.length());
1790 
1791 		DeltaCache.Ref ref = otp.popCachedDelta();
1792 		if (ref != null) {
1793 			byte[] zbuf = ref.get();
1794 			if (zbuf != null) {
1795 				out.writeHeader(otp, otp.getCachedSize());
1796 				out.write(zbuf);
1797 				typeStats.cntDeltas++;
1798 				typeStats.deltaBytes += out.length() - otp.getOffset();
1799 				return;
1800 			}
1801 		}
1802 
1803 		try (TemporaryBuffer.Heap delta = delta(otp)) {
1804 			out.writeHeader(otp, delta.length());
1805 
1806 			Deflater deflater = deflater();
1807 			deflater.reset();
1808 			DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1809 			delta.writeTo(dst, null);
1810 			dst.finish();
1811 		}
1812 		typeStats.cntDeltas++;
1813 		typeStats.deltaBytes += out.length() - otp.getOffset();
1814 	}
1815 
1816 	private TemporaryBuffer.Heap delta(ObjectToPack otp)
1817 			throws IOException {
1818 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1819 		byte[] res = buffer(otp);
1820 
1821 		// We never would have proposed this pair if the delta would be
1822 		// larger than the unpacked version of the object. So using it
1823 		// as our buffer limit is valid: we will never reach it.
1824 		//
1825 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1826 		index.encode(delta, res);
1827 		return delta;
1828 	}
1829 
1830 	private byte[] buffer(AnyObjectId objId) throws IOException {
1831 		return buffer(config, reader, objId);
1832 	}
1833 
1834 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1835 			throws IOException {
1836 		// PackWriter should have already pruned objects that
1837 		// are above the big file threshold, so our chances of
1838 		// the object being below it are very good. We really
1839 		// shouldn't be here, unless the implementation is odd.
1840 
1841 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1842 	}
1843 
1844 	private Deflater deflater() {
1845 		if (myDeflater == null)
1846 			myDeflater = new Deflater(config.getCompressionLevel());
1847 		return myDeflater;
1848 	}
1849 
1850 	private void writeChecksum(PackOutputStream out) throws IOException {
1851 		packcsum = out.getDigest();
1852 		out.write(packcsum);
1853 	}
1854 
1855 	private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
1856 			@NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
1857 			@NonNull Set<? extends ObjectId> have,
1858 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
1859 		final long countingStart = System.currentTimeMillis();
1860 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1861 
1862 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1863 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1864 		excludeFromBitmapSelection = noBitmaps;
1865 
1866 		canBuildBitmaps = config.isBuildBitmaps()
1867 				&& !shallowPack
1868 				&& have.isEmpty()
1869 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1870 		if (!shallowPack && useBitmaps) {
1871 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1872 			if (bitmapIndex != null) {
1873 				BitmapWalker bitmapWalker = new BitmapWalker(
1874 						walker, bitmapIndex, countingMonitor);
1875 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1876 				endPhase(countingMonitor);
1877 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1878 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1879 				return;
1880 			}
1881 		}
1882 
1883 		List<ObjectId> all = new ArrayList<>(want.size() + have.size());
1884 		all.addAll(want);
1885 		all.addAll(have);
1886 
1887 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1888 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1889 
1890 		walker.carry(include);
1891 
1892 		int haveEst = have.size();
1893 		if (have.isEmpty()) {
1894 			walker.sort(RevSort.COMMIT_TIME_DESC);
1895 		} else {
1896 			walker.sort(RevSort.TOPO);
1897 			if (thin)
1898 				walker.sort(RevSort.BOUNDARY, true);
1899 		}
1900 
1901 		List<RevObject> wantObjs = new ArrayList<>(want.size());
1902 		List<RevObject> haveObjs = new ArrayList<>(haveEst);
1903 		List<RevTag> wantTags = new ArrayList<>(want.size());
1904 
1905 		// Retrieve the RevWalk's versions of "want" and "have" objects to
1906 		// maintain any state previously set in the RevWalk.
1907 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1908 		try {
1909 			for (;;) {
1910 				try {
1911 					RevObject o = q.next();
1912 					if (o == null)
1913 						break;
1914 					if (have.contains(o))
1915 						haveObjs.add(o);
1916 					if (want.contains(o)) {
1917 						o.add(include);
1918 						wantObjs.add(o);
1919 						if (o instanceof RevTag)
1920 							wantTags.add((RevTag) o);
1921 					}
1922 				} catch (MissingObjectException e) {
1923 					if (ignoreMissingUninteresting
1924 							&& have.contains(e.getObjectId()))
1925 						continue;
1926 					throw e;
1927 				}
1928 			}
1929 		} finally {
1930 			q.release();
1931 		}
1932 
1933 		if (!wantTags.isEmpty()) {
1934 			all = new ArrayList<>(wantTags.size());
1935 			for (RevTag tag : wantTags)
1936 				all.add(tag.getObject());
1937 			q = walker.parseAny(all, true);
1938 			try {
1939 				while (q.next() != null) {
1940 					// Just need to pop the queue item to parse the object.
1941 				}
1942 			} finally {
1943 				q.release();
1944 			}
1945 		}
1946 
1947 		if (walker instanceof DepthWalk.ObjectWalk) {
1948 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1949 			for (RevObject obj : wantObjs) {
1950 				depthWalk.markRoot(obj);
1951 			}
1952 			// Mark the tree objects associated with "have" commits as
1953 			// uninteresting to avoid writing redundant blobs. A normal RevWalk
1954 			// lazily propagates the "uninteresting" state from a commit to its
1955 			// tree during the walk, but DepthWalks can terminate early so
1956 			// preemptively propagate that state here.
1957 			for (RevObject obj : haveObjs) {
1958 				if (obj instanceof RevCommit) {
1959 					RevTree t = ((RevCommit) obj).getTree();
1960 					depthWalk.markUninteresting(t);
1961 				}
1962 			}
1963 
1964 			if (unshallowObjects != null) {
1965 				for (ObjectId id : unshallowObjects) {
1966 					depthWalk.markUnshallow(walker.parseAny(id));
1967 				}
1968 			}
1969 		} else {
1970 			for (RevObject obj : wantObjs)
1971 				walker.markStart(obj);
1972 		}
1973 		for (RevObject obj : haveObjs)
1974 			walker.markUninteresting(obj);
1975 
1976 		final int maxBases = config.getDeltaSearchWindowSize();
1977 		Set<RevTree> baseTrees = new HashSet<>();
1978 		BlockList<RevCommit> commits = new BlockList<>();
1979 		Set<ObjectId> roots = new HashSet<>();
1980 		RevCommit c;
1981 		while ((c = walker.next()) != null) {
1982 			if (exclude(c))
1983 				continue;
1984 			if (c.has(RevFlag.UNINTERESTING)) {
1985 				if (baseTrees.size() <= maxBases)
1986 					baseTrees.add(c.getTree());
1987 				continue;
1988 			}
1989 
1990 			commits.add(c);
1991 			if (c.getParentCount() == 0) {
1992 				roots.add(c.copy());
1993 			}
1994 			countingMonitor.update(1);
1995 		}
1996 		stats.rootCommits = Collections.unmodifiableSet(roots);
1997 
1998 		if (shallowPack) {
1999 			for (RevCommit cmit : commits) {
2000 				addObject(cmit, 0);
2001 			}
2002 		} else {
2003 			int commitCnt = 0;
2004 			boolean putTagTargets = false;
2005 			for (RevCommit cmit : commits) {
2006 				if (!cmit.has(added)) {
2007 					cmit.add(added);
2008 					addObject(cmit, 0);
2009 					commitCnt++;
2010 				}
2011 
2012 				for (int i = 0; i < cmit.getParentCount(); i++) {
2013 					RevCommit p = cmit.getParent(i);
2014 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
2015 							&& !exclude(p)) {
2016 						p.add(added);
2017 						addObject(p, 0);
2018 						commitCnt++;
2019 					}
2020 				}
2021 
2022 				if (!putTagTargets && 4096 < commitCnt) {
2023 					for (ObjectId id : tagTargets) {
2024 						RevObject obj = walker.lookupOrNull(id);
2025 						if (obj instanceof RevCommit
2026 								&& obj.has(include)
2027 								&& !obj.has(RevFlag.UNINTERESTING)
2028 								&& !obj.has(added)) {
2029 							obj.add(added);
2030 							addObject(obj, 0);
2031 						}
2032 					}
2033 					putTagTargets = true;
2034 				}
2035 			}
2036 		}
2037 		commits = null;
2038 
2039 		if (thin && !baseTrees.isEmpty()) {
2040 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
2041 					objectsMap, edgeObjects, reader);
2042 			RevObject o;
2043 			while ((o = walker.nextObject()) != null) {
2044 				if (o.has(RevFlag.UNINTERESTING))
2045 					continue;
2046 				if (exclude(o))
2047 					continue;
2048 
2049 				int pathHash = walker.getPathHashCode();
2050 				byte[] pathBuf = walker.getPathBuffer();
2051 				int pathLen = walker.getPathLength();
2052 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
2053 				if (!depthSkip(o, walker)) {
2054 					filterAndAddObject(o, o.getType(), pathHash, want);
2055 				}
2056 				countingMonitor.update(1);
2057 			}
2058 		} else {
2059 			RevObject o;
2060 			while ((o = walker.nextObject()) != null) {
2061 				if (o.has(RevFlag.UNINTERESTING))
2062 					continue;
2063 				if (exclude(o))
2064 					continue;
2065 				if (!depthSkip(o, walker)) {
2066 					filterAndAddObject(o, o.getType(), walker.getPathHashCode(),
2067 									   want);
2068 				}
2069 				countingMonitor.update(1);
2070 			}
2071 		}
2072 
2073 		for (CachedPack pack : cachedPacks)
2074 			countingMonitor.update((int) pack.getObjectCount());
2075 		endPhase(countingMonitor);
2076 		stats.timeCounting = System.currentTimeMillis() - countingStart;
2077 		stats.bitmapIndexMisses = -1;
2078 	}
2079 
2080 	private void findObjectsToPackUsingBitmaps(
2081 			BitmapWalker bitmapWalker, Set<? extends ObjectId> want,
2082 			Set<? extends ObjectId> have)
2083 			throws MissingObjectException, IncorrectObjectTypeException,
2084 			IOException {
2085 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
2086 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
2087 				false);
2088 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
2089 
2090 		if (useCachedPacks && reuseSupport != null && !reuseValidate
2091 				&& (excludeInPacks == null || excludeInPacks.length == 0))
2092 			cachedPacks.addAll(
2093 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
2094 
2095 		for (BitmapObject obj : needBitmap) {
2096 			ObjectId objectId = obj.getObjectId();
2097 			if (exclude(objectId)) {
2098 				needBitmap.remove(objectId);
2099 				continue;
2100 			}
2101 			filterAndAddObject(objectId, obj.getType(), 0, want);
2102 		}
2103 
2104 		if (thin)
2105 			haveObjects = haveBitmap;
2106 	}
2107 
2108 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
2109 		final int size = list.size();
2110 		int src = 0;
2111 		int dst = 0;
2112 
2113 		for (; src < size; src++) {
2114 			ObjectToPack obj = list.get(src);
2115 			if (obj.isEdge())
2116 				continue;
2117 			if (dst != src)
2118 				list.set(dst, obj);
2119 			dst++;
2120 		}
2121 
2122 		while (dst < list.size())
2123 			list.remove(list.size() - 1);
2124 	}
2125 
2126 	/**
2127 	 * Include one object to the output file.
2128 	 * <p>
2129 	 * Objects are written in the order they are added. If the same object is
2130 	 * added twice, it may be written twice, creating a larger than necessary
2131 	 * file.
2132 	 *
2133 	 * @param object
2134 	 *            the object to add.
2135 	 * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
2136 	 *             the object is an unsupported type.
2137 	 */
2138 	public void addObject(RevObject object)
2139 			throws IncorrectObjectTypeException {
2140 		if (!exclude(object))
2141 			addObject(object, 0);
2142 	}
2143 
2144 	private void addObject(RevObject object, int pathHashCode) {
2145 		addObject(object, object.getType(), pathHashCode);
2146 	}
2147 
2148 	private void addObject(
2149 			final AnyObjectId src, final int type, final int pathHashCode) {
2150 		final ObjectToPack otp;
2151 		if (reuseSupport != null)
2152 			otp = reuseSupport.newObjectToPack(src, type);
2153 		else
2154 			otp = new ObjectToPack(src, type);
2155 		otp.setPathHash(pathHashCode);
2156 		objectsLists[type].add(otp);
2157 		objectsMap.add(otp);
2158 	}
2159 
2160 	/**
2161 	 * Determines if the object should be omitted from the pack as a result of
2162 	 * its depth (probably because of the tree:<depth> filter).
2163 	 * <p>
2164 	 * Causes {@code walker} to skip traversing the current tree, which ought to
2165 	 * have just started traversal, assuming this method is called as soon as a
2166 	 * new depth is reached.
2167 	 * <p>
2168 	 * This method increments the {@code treesTraversed} statistic.
2169 	 *
2170 	 * @param obj
2171 	 *            the object to check whether it should be omitted.
2172 	 * @param walker
2173 	 *            the walker being used for traveresal.
2174 	 * @return whether the given object should be skipped.
2175 	 */
2176 	private boolean depthSkip(@NonNull RevObject obj, ObjectWalk walker) {
2177 		long treeDepth = walker.getTreeDepth();
2178 
2179 		// Check if this object needs to be rejected because it is a tree or
2180 		// blob that is too deep from the root tree.
2181 
2182 		// A blob is considered one level deeper than the tree that contains it.
2183 		if (obj.getType() == OBJ_BLOB) {
2184 			treeDepth++;
2185 		} else {
2186 			stats.treesTraversed++;
2187 		}
2188 
2189 		if (filterSpec.getTreeDepthLimit() < 0 ||
2190 			treeDepth <= filterSpec.getTreeDepthLimit()) {
2191 			return false;
2192 		}
2193 
2194 		walker.skipTree();
2195 		return true;
2196 	}
2197 
2198 	// Adds the given object as an object to be packed, first performing
2199 	// filtering on blobs at or exceeding a given size.
2200 	private void filterAndAddObject(@NonNull AnyObjectId src, int type,
2201 			int pathHashCode, @NonNull Set<? extends AnyObjectId> want)
2202 			throws IOException {
2203 
2204 		// Check if this object needs to be rejected, doing the cheaper
2205 		// checks first.
2206 		boolean reject =
2207 			(!filterSpec.allowsType(type) && !want.contains(src)) ||
2208 			(filterSpec.getBlobLimit() >= 0 &&
2209 				type == OBJ_BLOB &&
2210 				!want.contains(src) &&
2211 				reader.getObjectSize(src, OBJ_BLOB) > filterSpec.getBlobLimit());
2212 		if (!reject) {
2213 			addObject(src, type, pathHashCode);
2214 		}
2215 	}
2216 
2217 	private boolean exclude(AnyObjectId objectId) {
2218 		if (excludeInPacks == null)
2219 			return false;
2220 		if (excludeInPackLast.contains(objectId))
2221 			return true;
2222 		for (ObjectIdSet idx : excludeInPacks) {
2223 			if (idx.contains(objectId)) {
2224 				excludeInPackLast = idx;
2225 				return true;
2226 			}
2227 		}
2228 		return false;
2229 	}
2230 
2231 	/**
2232 	 * Select an object representation for this writer.
2233 	 * <p>
2234 	 * An {@link org.eclipse.jgit.lib.ObjectReader} implementation should invoke
2235 	 * this method once for each representation available for an object, to
2236 	 * allow the writer to find the most suitable one for the output.
2237 	 *
2238 	 * @param otp
2239 	 *            the object being packed.
2240 	 * @param next
2241 	 *            the next available representation from the repository.
2242 	 */
2243 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
2244 		int nFmt = next.getFormat();
2245 
2246 		if (!cachedPacks.isEmpty()) {
2247 			if (otp.isEdge())
2248 				return;
2249 			if (nFmt == PACK_WHOLE || nFmt == PACK_DELTA) {
2250 				for (CachedPack pack : cachedPacks) {
2251 					if (pack.hasObject(otp, next)) {
2252 						otp.setEdge();
2253 						otp.clearDeltaBase();
2254 						otp.clearReuseAsIs();
2255 						pruneCurrentObjectList = true;
2256 						return;
2257 					}
2258 				}
2259 			}
2260 		}
2261 
2262 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
2263 			ObjectId baseId = next.getDeltaBase();
2264 			ObjectToPack ptr = objectsMap.get(baseId);
2265 			if (ptr != null && !ptr.isEdge()) {
2266 				otp.setDeltaBase(ptr);
2267 				otp.setReuseAsIs();
2268 			} else if (thin && have(ptr, baseId)) {
2269 				otp.setDeltaBase(baseId);
2270 				otp.setReuseAsIs();
2271 			} else {
2272 				otp.clearDeltaBase();
2273 				otp.clearReuseAsIs();
2274 			}
2275 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
2276 			int nWeight = next.getWeight();
2277 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
2278 				// We've chosen another PACK_WHOLE format for this object,
2279 				// choose the one that has the smaller compressed size.
2280 				//
2281 				if (otp.getWeight() <= nWeight)
2282 					return;
2283 			}
2284 			otp.clearDeltaBase();
2285 			otp.setReuseAsIs();
2286 			otp.setWeight(nWeight);
2287 		} else {
2288 			otp.clearDeltaBase();
2289 			otp.clearReuseAsIs();
2290 		}
2291 
2292 		otp.setDeltaAttempted(reuseDeltas && next.wasDeltaAttempted());
2293 		otp.select(next);
2294 	}
2295 
2296 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
2297 		return (ptr != null && ptr.isEdge())
2298 				|| (haveObjects != null && haveObjects.contains(objectId));
2299 	}
2300 
2301 	/**
2302 	 * Prepares the bitmaps to be written to the bitmap index file.
2303 	 * <p>
2304 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
2305 	 * object graph at selected commits. Writing a bitmap index is an optional
2306 	 * feature that not all pack users may require.
2307 	 * <p>
2308 	 * Called after {@link #writeIndex(OutputStream)}.
2309 	 * <p>
2310 	 * To reduce memory internal state is cleared during this method, rendering
2311 	 * the PackWriter instance useless for anything further than a call to write
2312 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
2313 	 *
2314 	 * @param pm
2315 	 *            progress monitor to report bitmap building work.
2316 	 * @return whether a bitmap index may be written.
2317 	 * @throws java.io.IOException
2318 	 *             when some I/O problem occur during reading objects.
2319 	 */
2320 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2321 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2322 				|| !cachedPacks.isEmpty())
2323 			return false;
2324 
2325 		if (pm == null)
2326 			pm = NullProgressMonitor.INSTANCE;
2327 
2328 		int numCommits = objectsLists[OBJ_COMMIT].size();
2329 		List<ObjectToPack> byName = sortByName();
2330 		sortedByName = null;
2331 		objectsLists = null;
2332 		objectsMap = null;
2333 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2334 		byName = null;
2335 
2336 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2337 				reader, writeBitmaps, pm, stats.interestingObjects, config);
2338 
2339 		Collection<BitmapCommit> selectedCommits = bitmapPreparer
2340 				.selectCommits(numCommits, excludeFromBitmapSelection);
2341 
2342 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2343 
2344 		BitmapWalker walker = bitmapPreparer.newBitmapWalker();
2345 		AnyObjectId last = null;
2346 		for (BitmapCommit cmit : selectedCommits) {
2347 			if (!cmit.isReuseWalker()) {
2348 				walker = bitmapPreparer.newBitmapWalker();
2349 			}
2350 			BitmapBuilder bitmap = walker.findObjects(
2351 					Collections.singleton(cmit), null, false);
2352 
2353 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2354 				throw new IllegalStateException(MessageFormat.format(
2355 						JGitText.get().bitmapMissingObject, cmit.name(),
2356 						last.name()));
2357 			last = BitmapCommit.copyFrom(cmit).build();
2358 			writeBitmaps.processBitmapForWrite(cmit, bitmap.build(),
2359 					cmit.getFlags());
2360 
2361 			// The bitmap walker should stop when the walk hits the previous
2362 			// commit, which saves time.
2363 			walker.setPrevCommit(last);
2364 			walker.setPrevBitmap(bitmap);
2365 
2366 			pm.update(1);
2367 		}
2368 
2369 		endPhase(pm);
2370 		return true;
2371 	}
2372 
2373 	private boolean reuseDeltaFor(ObjectToPack otp) {
2374 		int type = otp.getType();
2375 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2376 			return true;
2377 		if (type == OBJ_COMMIT)
2378 			return reuseDeltaCommits;
2379 		if (type == OBJ_TAG)
2380 			return false;
2381 		return true;
2382 	}
2383 
2384 	private class MutableState {
2385 		/** Estimated size of a single ObjectToPack instance. */
2386 		// Assume 64-bit pointers, since this is just an estimate.
2387 		private static final long OBJECT_TO_PACK_SIZE =
2388 				(2 * 8)               // Object header
2389 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2390 				+ (8 + 8)             // PackedObjectInfo fields
2391 				+ 8                   // ObjectIdOwnerMap fields
2392 				+ 40                  // AnyObjectId fields
2393 				+ 8;                  // Reference in BlockList
2394 
2395 		private final long totalDeltaSearchBytes;
2396 
2397 		private volatile PackingPhase phase;
2398 
2399 		MutableState() {
2400 			phase = PackingPhase.COUNTING;
2401 			if (config.isDeltaCompress()) {
2402 				int threads = config.getThreads();
2403 				if (threads <= 0)
2404 					threads = Runtime.getRuntime().availableProcessors();
2405 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2406 						+ config.getBigFileThreshold();
2407 			} else
2408 				totalDeltaSearchBytes = 0;
2409 		}
2410 
2411 		State snapshot() {
2412 			long objCnt = 0;
2413 			BlockList<ObjectToPack>[] lists = objectsLists;
2414 			if (lists != null) {
2415 				objCnt += lists[OBJ_COMMIT].size();
2416 				objCnt += lists[OBJ_TREE].size();
2417 				objCnt += lists[OBJ_BLOB].size();
2418 				objCnt += lists[OBJ_TAG].size();
2419 				// Exclude CachedPacks.
2420 			}
2421 
2422 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2423 			PackingPhase curr = phase;
2424 			if (curr == PackingPhase.COMPRESSING)
2425 				bytesUsed += totalDeltaSearchBytes;
2426 			return new State(curr, bytesUsed);
2427 		}
2428 	}
2429 
2430 	/** Possible states that a PackWriter can be in. */
2431 	public enum PackingPhase {
2432 		/** Counting objects phase. */
2433 		COUNTING,
2434 
2435 		/** Getting sizes phase. */
2436 		GETTING_SIZES,
2437 
2438 		/** Finding sources phase. */
2439 		FINDING_SOURCES,
2440 
2441 		/** Compressing objects phase. */
2442 		COMPRESSING,
2443 
2444 		/** Writing objects phase. */
2445 		WRITING,
2446 
2447 		/** Building bitmaps phase. */
2448 		BUILDING_BITMAPS;
2449 	}
2450 
2451 	/** Summary of the current state of a PackWriter. */
2452 	public class State {
2453 		private final PackingPhase phase;
2454 
2455 		private final long bytesUsed;
2456 
2457 		State(PackingPhase phase, long bytesUsed) {
2458 			this.phase = phase;
2459 			this.bytesUsed = bytesUsed;
2460 		}
2461 
2462 		/** @return the PackConfig used to build the writer. */
2463 		public PackConfig getConfig() {
2464 			return config;
2465 		}
2466 
2467 		/** @return the current phase of the writer. */
2468 		public PackingPhase getPhase() {
2469 			return phase;
2470 		}
2471 
2472 		/** @return an estimate of the total memory used by the writer. */
2473 		public long estimateBytesUsed() {
2474 			return bytesUsed;
2475 		}
2476 
2477 		@SuppressWarnings("nls")
2478 		@Override
2479 		public String toString() {
2480 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2481 		}
2482 	}
2483 
2484 	/**
2485 	 * Configuration related to the packfile URI feature.
2486 	 *
2487 	 * @since 5.5
2488 	 */
2489 	public static class PackfileUriConfig {
2490 		@NonNull
2491 		private final PacketLineOut pckOut;
2492 
2493 		@NonNull
2494 		private final Collection<String> protocolsSupported;
2495 
2496 		@NonNull
2497 		private final CachedPackUriProvider cachedPackUriProvider;
2498 
2499 		/**
2500 		 * @param pckOut where to write "packfile-uri" lines to (should
2501 		 *     output to the same stream as the one passed to
2502 		 *     PackWriter#writePack)
2503 		 * @param protocolsSupported list of protocols supported (e.g. "https")
2504 		 * @param cachedPackUriProvider provider of URIs corresponding
2505 		 *     to cached packs
2506 		 * @since 5.5
2507 		 */
2508 		public PackfileUriConfig(@NonNull PacketLineOut pckOut,
2509 				@NonNull Collection<String> protocolsSupported,
2510 				@NonNull CachedPackUriProvider cachedPackUriProvider) {
2511 			this.pckOut = pckOut;
2512 			this.protocolsSupported = protocolsSupported;
2513 			this.cachedPackUriProvider = cachedPackUriProvider;
2514 		}
2515 	}
2516 }