View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.pack;
46  
47  import static java.util.Objects.requireNonNull;
48  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
49  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
50  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
51  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
52  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
53  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
54  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
55  
56  import java.io.IOException;
57  import java.io.OutputStream;
58  import java.lang.ref.WeakReference;
59  import java.security.MessageDigest;
60  import java.text.MessageFormat;
61  import java.util.ArrayList;
62  import java.util.Arrays;
63  import java.util.Collection;
64  import java.util.Collections;
65  import java.util.HashMap;
66  import java.util.HashSet;
67  import java.util.Iterator;
68  import java.util.List;
69  import java.util.Map;
70  import java.util.NoSuchElementException;
71  import java.util.Set;
72  import java.util.concurrent.ConcurrentHashMap;
73  import java.util.concurrent.ExecutionException;
74  import java.util.concurrent.Executor;
75  import java.util.concurrent.ExecutorService;
76  import java.util.concurrent.Executors;
77  import java.util.concurrent.Future;
78  import java.util.concurrent.TimeUnit;
79  import java.util.zip.CRC32;
80  import java.util.zip.CheckedOutputStream;
81  import java.util.zip.Deflater;
82  import java.util.zip.DeflaterOutputStream;
83  
84  import org.eclipse.jgit.annotations.NonNull;
85  import org.eclipse.jgit.annotations.Nullable;
86  import org.eclipse.jgit.errors.CorruptObjectException;
87  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
88  import org.eclipse.jgit.errors.LargeObjectException;
89  import org.eclipse.jgit.errors.MissingObjectException;
90  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
91  import org.eclipse.jgit.internal.JGitText;
92  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
93  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
94  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
95  import org.eclipse.jgit.lib.AnyObjectId;
96  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
97  import org.eclipse.jgit.lib.BatchingProgressMonitor;
98  import org.eclipse.jgit.lib.BitmapIndex;
99  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
100 import org.eclipse.jgit.lib.BitmapObject;
101 import org.eclipse.jgit.lib.Constants;
102 import org.eclipse.jgit.lib.NullProgressMonitor;
103 import org.eclipse.jgit.lib.ObjectId;
104 import org.eclipse.jgit.lib.ObjectIdOwnerMap;
105 import org.eclipse.jgit.lib.ObjectIdSet;
106 import org.eclipse.jgit.lib.ObjectLoader;
107 import org.eclipse.jgit.lib.ObjectReader;
108 import org.eclipse.jgit.lib.ProgressMonitor;
109 import org.eclipse.jgit.lib.Repository;
110 import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
111 import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
112 import org.eclipse.jgit.revwalk.BitmapWalker;
113 import org.eclipse.jgit.revwalk.DepthWalk;
114 import org.eclipse.jgit.revwalk.ObjectWalk;
115 import org.eclipse.jgit.revwalk.RevCommit;
116 import org.eclipse.jgit.revwalk.RevFlag;
117 import org.eclipse.jgit.revwalk.RevObject;
118 import org.eclipse.jgit.revwalk.RevSort;
119 import org.eclipse.jgit.revwalk.RevTag;
120 import org.eclipse.jgit.revwalk.RevTree;
121 import org.eclipse.jgit.storage.pack.PackConfig;
122 import org.eclipse.jgit.storage.pack.PackStatistics;
123 import org.eclipse.jgit.transport.FilterSpec;
124 import org.eclipse.jgit.transport.ObjectCountCallback;
125 import org.eclipse.jgit.transport.PacketLineOut;
126 import org.eclipse.jgit.transport.WriteAbortedException;
127 import org.eclipse.jgit.util.BlockList;
128 import org.eclipse.jgit.util.TemporaryBuffer;
129 
130 /**
131  * <p>
132  * PackWriter class is responsible for generating pack files from specified set
133  * of objects from repository. This implementation produce pack files in format
134  * version 2.
135  * </p>
136  * <p>
137  * Source of objects may be specified in two ways:
138  * <ul>
139  * <li>(usually) by providing sets of interesting and uninteresting objects in
140  * repository - all interesting objects and their ancestors except uninteresting
141  * objects and their ancestors will be included in pack, or</li>
142  * <li>by providing iterator of {@link org.eclipse.jgit.revwalk.RevObject}
143  * specifying exact list and order of objects in pack</li>
144  * </ul>
145  * <p>
146  * Typical usage consists of creating an instance, configuring options,
147  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
148  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
149  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
150  * pack is being stored as a file the matching index can be written out after
151  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
152  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
153  * followed by {@link #writeBitmapIndex(OutputStream)}.
154  * </p>
155  * <p>
156  * Class provide set of configurable options and
157  * {@link org.eclipse.jgit.lib.ProgressMonitor} support, as operations may take
158  * a long time for big repositories. Deltas searching algorithm is <b>NOT
159  * IMPLEMENTED</b> yet - this implementation relies only on deltas and objects
160  * reuse.
161  * </p>
162  * <p>
163  * This class is not thread safe. It is intended to be used in one thread as a
164  * single pass to produce one pack. Invoking methods multiple times or out of
165  * order is not supported as internal data structures are destroyed during
166  * certain phases to save memory when packing large repositories.
167  * </p>
168  */
169 public class PackWriter implements AutoCloseable {
170 	private static final int PACK_VERSION_GENERATED = 2;
171 
172 	/** Empty set of objects for {@code preparePack()}. */
173 	public static final Set<ObjectId> NONE = Collections.emptySet();
174 
175 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
176 			new ConcurrentHashMap<>();
177 
178 	private static final Iterable<PackWriter> instancesIterable = new Iterable<PackWriter>() {
179 		@Override
180 		public Iterator<PackWriter> iterator() {
181 			return new Iterator<PackWriter>() {
182 				private final Iterator<WeakReference<PackWriter>> it =
183 						instances.keySet().iterator();
184 				private PackWriter next;
185 
186 				@Override
187 				public boolean hasNext() {
188 					if (next != null)
189 						return true;
190 					while (it.hasNext()) {
191 						WeakReference<PackWriter> ref = it.next();
192 						next = ref.get();
193 						if (next != null)
194 							return true;
195 						it.remove();
196 					}
197 					return false;
198 				}
199 
200 				@Override
201 				public PackWriter next() {
202 					if (hasNext()) {
203 						PackWriter result = next;
204 						next = null;
205 						return result;
206 					}
207 					throw new NoSuchElementException();
208 				}
209 
210 				@Override
211 				public void remove() {
212 					throw new UnsupportedOperationException();
213 				}
214 			};
215 		}
216 	};
217 
218 	/**
219 	 * Get all allocated, non-released PackWriters instances.
220 	 *
221 	 * @return all allocated, non-released PackWriters instances.
222 	 */
223 	public static Iterable<PackWriter> getInstances() {
224 		return instancesIterable;
225 	}
226 
227 	@SuppressWarnings("unchecked")
228 	BlockList<ObjectToPack> objectsLists[] = new BlockList[OBJ_TAG + 1];
229 	{
230 		objectsLists[OBJ_COMMIT] = new BlockList<>();
231 		objectsLists[OBJ_TREE] = new BlockList<>();
232 		objectsLists[OBJ_BLOB] = new BlockList<>();
233 		objectsLists[OBJ_TAG] = new BlockList<>();
234 	}
235 
236 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<>();
237 
238 	// edge objects for thin packs
239 	private List<ObjectToPack> edgeObjects = new BlockList<>();
240 
241 	// Objects the client is known to have already.
242 	private BitmapBuilder haveObjects;
243 
244 	private List<CachedPack> cachedPacks = new ArrayList<>(2);
245 
246 	private Set<ObjectId> tagTargets = NONE;
247 
248 	private Set<? extends ObjectId> excludeFromBitmapSelection = NONE;
249 
250 	private ObjectIdSet[] excludeInPacks;
251 
252 	private ObjectIdSet excludeInPackLast;
253 
254 	private Deflater myDeflater;
255 
256 	private final ObjectReader reader;
257 
258 	/** {@link #reader} recast to the reuse interface, if it supports it. */
259 	private final ObjectReuseAsIs reuseSupport;
260 
261 	final PackConfig config;
262 
263 	private final PackStatistics.Accumulator stats;
264 
265 	private final MutableState state;
266 
267 	private final WeakReference<PackWriter> selfRef;
268 
269 	private PackStatistics.ObjectType.Accumulator typeStats;
270 
271 	private List<ObjectToPack> sortedByName;
272 
273 	private byte packcsum[];
274 
275 	private boolean deltaBaseAsOffset;
276 
277 	private boolean reuseDeltas;
278 
279 	private boolean reuseDeltaCommits;
280 
281 	private boolean reuseValidate;
282 
283 	private boolean thin;
284 
285 	private boolean useCachedPacks;
286 
287 	private boolean useBitmaps;
288 
289 	private boolean ignoreMissingUninteresting = true;
290 
291 	private boolean pruneCurrentObjectList;
292 
293 	private boolean shallowPack;
294 
295 	private boolean canBuildBitmaps;
296 
297 	private boolean indexDisabled;
298 
299 	private int depth;
300 
301 	private Collection<? extends ObjectId> unshallowObjects;
302 
303 	private PackBitmapIndexBuilder writeBitmaps;
304 
305 	private CRC32 crc32;
306 
307 	private ObjectCountCallback callback;
308 
309 	private FilterSpec filterSpec = FilterSpec.NO_FILTER;
310 
311 	private PackfileUriConfig packfileUriConfig;
312 
313 	/**
314 	 * Create writer for specified repository.
315 	 * <p>
316 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
317 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
318 	 *
319 	 * @param repo
320 	 *            repository where objects are stored.
321 	 */
322 	public PackWriter(Repository repo) {
323 		this(repo, repo.newObjectReader());
324 	}
325 
326 	/**
327 	 * Create a writer to load objects from the specified reader.
328 	 * <p>
329 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
330 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
331 	 *
332 	 * @param reader
333 	 *            reader to read from the repository with.
334 	 */
335 	public PackWriter(ObjectReader reader) {
336 		this(new PackConfig(), reader);
337 	}
338 
339 	/**
340 	 * Create writer for specified repository.
341 	 * <p>
342 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
343 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
344 	 *
345 	 * @param repo
346 	 *            repository where objects are stored.
347 	 * @param reader
348 	 *            reader to read from the repository with.
349 	 */
350 	public PackWriter(Repository repo, ObjectReader reader) {
351 		this(new PackConfig(repo), reader);
352 	}
353 
354 	/**
355 	 * Create writer with a specified configuration.
356 	 * <p>
357 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
358 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
359 	 *
360 	 * @param config
361 	 *            configuration for the pack writer.
362 	 * @param reader
363 	 *            reader to read from the repository with.
364 	 */
365 	public PackWriter(PackConfig config, ObjectReader reader) {
366 		this(config, reader, null);
367 	}
368 
369 	/**
370 	 * Create writer with a specified configuration.
371 	 * <p>
372 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
373 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
374 	 *
375 	 * @param config
376 	 *            configuration for the pack writer.
377 	 * @param reader
378 	 *            reader to read from the repository with.
379 	 * @param statsAccumulator
380 	 *            accumulator for statics
381 	 */
382 	public PackWriter(PackConfig config, final ObjectReader reader,
383 			@Nullable PackStatistics.Accumulator statsAccumulator) {
384 		this.config = config;
385 		this.reader = reader;
386 		if (reader instanceof ObjectReuseAsIs)
387 			reuseSupport = ((ObjectReuseAsIs) reader);
388 		else
389 			reuseSupport = null;
390 
391 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
392 		reuseDeltas = config.isReuseDeltas();
393 		reuseValidate = true; // be paranoid by default
394 		stats = statsAccumulator != null ? statsAccumulator
395 				: new PackStatistics.Accumulator();
396 		state = new MutableState();
397 		selfRef = new WeakReference<>(this);
398 		instances.put(selfRef, Boolean.TRUE);
399 	}
400 
401 	/**
402 	 * Set the {@code ObjectCountCallback}.
403 	 * <p>
404 	 * It should be set before calling
405 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
406 	 *
407 	 * @param callback
408 	 *            the callback to set
409 	 * @return this object for chaining.
410 	 */
411 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
412 		this.callback = callback;
413 		return this;
414 	}
415 
416 	/**
417 	 * Records the set of shallow commits in the client.
418 	 *
419 	 * @param clientShallowCommits
420 	 *            the shallow commits in the client
421 	 */
422 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
423 		stats.clientShallowCommits = Collections
424 				.unmodifiableSet(new HashSet<>(clientShallowCommits));
425 	}
426 
427 	/**
428 	 * Check whether writer can store delta base as an offset (new style
429 	 * reducing pack size) or should store it as an object id (legacy style,
430 	 * compatible with old readers).
431 	 *
432 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
433 	 *
434 	 * @return true if delta base is stored as an offset; false if it is stored
435 	 *         as an object id.
436 	 */
437 	public boolean isDeltaBaseAsOffset() {
438 		return deltaBaseAsOffset;
439 	}
440 
441 	/**
442 	 * Set writer delta base format. Delta base can be written as an offset in a
443 	 * pack file (new approach reducing file size) or as an object id (legacy
444 	 * approach, compatible with old readers).
445 	 *
446 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
447 	 *
448 	 * @param deltaBaseAsOffset
449 	 *            boolean indicating whether delta base can be stored as an
450 	 *            offset.
451 	 */
452 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
453 		this.deltaBaseAsOffset = deltaBaseAsOffset;
454 	}
455 
456 	/**
457 	 * Check if the writer will reuse commits that are already stored as deltas.
458 	 *
459 	 * @return true if the writer would reuse commits stored as deltas, assuming
460 	 *         delta reuse is already enabled.
461 	 */
462 	public boolean isReuseDeltaCommits() {
463 		return reuseDeltaCommits;
464 	}
465 
466 	/**
467 	 * Set the writer to reuse existing delta versions of commits.
468 	 *
469 	 * @param reuse
470 	 *            if true, the writer will reuse any commits stored as deltas.
471 	 *            By default the writer does not reuse delta commits.
472 	 */
473 	public void setReuseDeltaCommits(boolean reuse) {
474 		reuseDeltaCommits = reuse;
475 	}
476 
477 	/**
478 	 * Check if the writer validates objects before copying them.
479 	 *
480 	 * @return true if validation is enabled; false if the reader will handle
481 	 *         object validation as a side-effect of it consuming the output.
482 	 */
483 	public boolean isReuseValidatingObjects() {
484 		return reuseValidate;
485 	}
486 
487 	/**
488 	 * Enable (or disable) object validation during packing.
489 	 *
490 	 * @param validate
491 	 *            if true the pack writer will validate an object before it is
492 	 *            put into the output. This additional validation work may be
493 	 *            necessary to avoid propagating corruption from one local pack
494 	 *            file to another local pack file.
495 	 */
496 	public void setReuseValidatingObjects(boolean validate) {
497 		reuseValidate = validate;
498 	}
499 
500 	/**
501 	 * Whether this writer is producing a thin pack.
502 	 *
503 	 * @return true if this writer is producing a thin pack.
504 	 */
505 	public boolean isThin() {
506 		return thin;
507 	}
508 
509 	/**
510 	 * Whether writer may pack objects with delta base object not within set of
511 	 * objects to pack
512 	 *
513 	 * @param packthin
514 	 *            a boolean indicating whether writer may pack objects with
515 	 *            delta base object not within set of objects to pack, but
516 	 *            belonging to party repository (uninteresting/boundary) as
517 	 *            determined by set; this kind of pack is used only for
518 	 *            transport; true - to produce thin pack, false - otherwise.
519 	 */
520 	public void setThin(boolean packthin) {
521 		thin = packthin;
522 	}
523 
524 	/**
525 	 * Whether to reuse cached packs.
526 	 *
527 	 * @return {@code true} to reuse cached packs. If true index creation isn't
528 	 *         available.
529 	 */
530 	public boolean isUseCachedPacks() {
531 		return useCachedPacks;
532 	}
533 
534 	/**
535 	 * Whether to use cached packs
536 	 *
537 	 * @param useCached
538 	 *            if set to {@code true} and a cached pack is present, it will
539 	 *            be appended onto the end of a thin-pack, reducing the amount
540 	 *            of working set space and CPU used by PackWriter. Enabling this
541 	 *            feature prevents PackWriter from creating an index for the
542 	 *            newly created pack, so its only suitable for writing to a
543 	 *            network client, where the client will make the index.
544 	 */
545 	public void setUseCachedPacks(boolean useCached) {
546 		useCachedPacks = useCached;
547 	}
548 
549 	/**
550 	 * Whether to use bitmaps
551 	 *
552 	 * @return {@code true} to use bitmaps for ObjectWalks, if available.
553 	 */
554 	public boolean isUseBitmaps() {
555 		return useBitmaps;
556 	}
557 
558 	/**
559 	 * Whether to use bitmaps
560 	 *
561 	 * @param useBitmaps
562 	 *            if set to true, bitmaps will be used when preparing a pack.
563 	 */
564 	public void setUseBitmaps(boolean useBitmaps) {
565 		this.useBitmaps = useBitmaps;
566 	}
567 
568 	/**
569 	 * Whether the index file cannot be created by this PackWriter.
570 	 *
571 	 * @return {@code true} if the index file cannot be created by this
572 	 *         PackWriter.
573 	 */
574 	public boolean isIndexDisabled() {
575 		return indexDisabled || !cachedPacks.isEmpty();
576 	}
577 
578 	/**
579 	 * Whether to disable creation of the index file.
580 	 *
581 	 * @param noIndex
582 	 *            {@code true} to disable creation of the index file.
583 	 */
584 	public void setIndexDisabled(boolean noIndex) {
585 		this.indexDisabled = noIndex;
586 	}
587 
588 	/**
589 	 * Whether to ignore missing uninteresting objects
590 	 *
591 	 * @return {@code true} to ignore objects that are uninteresting and also
592 	 *         not found on local disk; false to throw a
593 	 *         {@link org.eclipse.jgit.errors.MissingObjectException} out of
594 	 *         {@link #preparePack(ProgressMonitor, Set, Set)} if an
595 	 *         uninteresting object is not in the source repository. By default,
596 	 *         true, permitting gracefully ignoring of uninteresting objects.
597 	 */
598 	public boolean isIgnoreMissingUninteresting() {
599 		return ignoreMissingUninteresting;
600 	}
601 
602 	/**
603 	 * Whether writer should ignore non existing uninteresting objects
604 	 *
605 	 * @param ignore
606 	 *            {@code true} if writer should ignore non existing
607 	 *            uninteresting objects during construction set of objects to
608 	 *            pack; false otherwise - non existing uninteresting objects may
609 	 *            cause {@link org.eclipse.jgit.errors.MissingObjectException}
610 	 */
611 	public void setIgnoreMissingUninteresting(boolean ignore) {
612 		ignoreMissingUninteresting = ignore;
613 	}
614 
615 	/**
616 	 * Set the tag targets that should be hoisted earlier during packing.
617 	 * <p>
618 	 * Callers may put objects into this set before invoking any of the
619 	 * preparePack methods to influence where an annotated tag's target is
620 	 * stored within the resulting pack. Typically these will be clustered
621 	 * together, and hoisted earlier in the file even if they are ancient
622 	 * revisions, allowing readers to find tag targets with better locality.
623 	 *
624 	 * @param objects
625 	 *            objects that annotated tags point at.
626 	 */
627 	public void setTagTargets(Set<ObjectId> objects) {
628 		tagTargets = objects;
629 	}
630 
631 	/**
632 	 * Configure this pack for a shallow clone.
633 	 *
634 	 * @param depth
635 	 *            maximum depth of history to return. 1 means return only the
636 	 *            "wants".
637 	 * @param unshallow
638 	 *            objects which used to be shallow on the client, but are being
639 	 *            extended as part of this fetch
640 	 */
641 	public void setShallowPack(int depth,
642 			Collection<? extends ObjectId> unshallow) {
643 		this.shallowPack = true;
644 		this.depth = depth;
645 		this.unshallowObjects = unshallow;
646 	}
647 
648 	/**
649 	 * @param filter the filter which indicates what and what not this writer
650 	 *            should include
651 	 */
652 	public void setFilterSpec(@NonNull FilterSpec filter) {
653 		filterSpec = requireNonNull(filter);
654 	}
655 
656 	/**
657 	 * @param config configuration related to packfile URIs
658 	 * @since 5.5
659 	 */
660 	public void setPackfileUriConfig(PackfileUriConfig config) {
661 		packfileUriConfig = config;
662 	}
663 
664 	/**
665 	 * Returns objects number in a pack file that was created by this writer.
666 	 *
667 	 * @return number of objects in pack.
668 	 * @throws java.io.IOException
669 	 *             a cached pack cannot supply its object count.
670 	 */
671 	public long getObjectCount() throws IOException {
672 		if (stats.totalObjects == 0) {
673 			long objCnt = 0;
674 
675 			objCnt += objectsLists[OBJ_COMMIT].size();
676 			objCnt += objectsLists[OBJ_TREE].size();
677 			objCnt += objectsLists[OBJ_BLOB].size();
678 			objCnt += objectsLists[OBJ_TAG].size();
679 
680 			for (CachedPack pack : cachedPacks)
681 				objCnt += pack.getObjectCount();
682 			return objCnt;
683 		}
684 		return stats.totalObjects;
685 	}
686 
687 	private long getUnoffloadedObjectCount() throws IOException {
688 		long objCnt = 0;
689 
690 		objCnt += objectsLists[OBJ_COMMIT].size();
691 		objCnt += objectsLists[OBJ_TREE].size();
692 		objCnt += objectsLists[OBJ_BLOB].size();
693 		objCnt += objectsLists[OBJ_TAG].size();
694 
695 		for (CachedPack pack : cachedPacks) {
696 			CachedPackUriProvider.PackInfo packInfo =
697 				packfileUriConfig.cachedPackUriProvider.getInfo(
698 					pack, packfileUriConfig.protocolsSupported);
699 			if (packInfo == null) {
700 				objCnt += pack.getObjectCount();
701 			}
702 		}
703 
704 		return objCnt;
705 	}
706 
707 	/**
708 	 * Returns the object ids in the pack file that was created by this writer.
709 	 * <p>
710 	 * This method can only be invoked after
711 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
712 	 * been invoked and completed successfully.
713 	 *
714 	 * @return set of objects in pack.
715 	 * @throws java.io.IOException
716 	 *             a cached pack cannot supply its object ids.
717 	 */
718 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
719 			throws IOException {
720 		if (!cachedPacks.isEmpty())
721 			throw new IOException(
722 					JGitText.get().cachedPacksPreventsListingObjects);
723 
724 		if (writeBitmaps != null) {
725 			return writeBitmaps.getObjectSet();
726 		}
727 
728 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
729 		for (BlockList<ObjectToPack> objList : objectsLists) {
730 			if (objList != null) {
731 				for (ObjectToPack otp : objList)
732 					r.add(new ObjectIdOwnerMap.Entry(otp) {
733 						// A new entry that copies the ObjectId
734 					});
735 			}
736 		}
737 		return r;
738 	}
739 
740 	/**
741 	 * Add a pack index whose contents should be excluded from the result.
742 	 *
743 	 * @param idx
744 	 *            objects in this index will not be in the output pack.
745 	 */
746 	public void excludeObjects(ObjectIdSet idx) {
747 		if (excludeInPacks == null) {
748 			excludeInPacks = new ObjectIdSet[] { idx };
749 			excludeInPackLast = idx;
750 		} else {
751 			int cnt = excludeInPacks.length;
752 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
753 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
754 			newList[cnt] = idx;
755 			excludeInPacks = newList;
756 		}
757 	}
758 
759 	/**
760 	 * Prepare the list of objects to be written to the pack stream.
761 	 * <p>
762 	 * Iterator <b>exactly</b> determines which objects are included in a pack
763 	 * and order they appear in pack (except that objects order by type is not
764 	 * needed at input). This order should conform general rules of ordering
765 	 * objects in git - by recency and path (type and delta-base first is
766 	 * internally secured) and responsibility for guaranteeing this order is on
767 	 * a caller side. Iterator must return each id of object to write exactly
768 	 * once.
769 	 * </p>
770 	 *
771 	 * @param objectsSource
772 	 *            iterator of object to store in a pack; order of objects within
773 	 *            each type is important, ordering by type is not needed;
774 	 *            allowed types for objects are
775 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_COMMIT},
776 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TREE},
777 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_BLOB} and
778 	 *            {@link org.eclipse.jgit.lib.Constants#OBJ_TAG}; objects
779 	 *            returned by iterator may be later reused by caller as object
780 	 *            id and type are internally copied in each iteration.
781 	 * @throws java.io.IOException
782 	 *             when some I/O problem occur during reading objects.
783 	 */
784 	public void preparePack(@NonNull Iterator<RevObject> objectsSource)
785 			throws IOException {
786 		while (objectsSource.hasNext()) {
787 			addObject(objectsSource.next());
788 		}
789 	}
790 
791 	/**
792 	 * Prepare the list of objects to be written to the pack stream.
793 	 * <p>
794 	 * Basing on these 2 sets, another set of objects to put in a pack file is
795 	 * created: this set consists of all objects reachable (ancestors) from
796 	 * interesting objects, except uninteresting objects and their ancestors.
797 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
798 	 * extensively to find out that appropriate set of output objects and their
799 	 * optimal order in output pack. Order is consistent with general git
800 	 * in-pack rules: sort by object type, recency, path and delta-base first.
801 	 * </p>
802 	 *
803 	 * @param countingMonitor
804 	 *            progress during object enumeration.
805 	 * @param want
806 	 *            collection of objects to be marked as interesting (start
807 	 *            points of graph traversal). Must not be {@code null}.
808 	 * @param have
809 	 *            collection of objects to be marked as uninteresting (end
810 	 *            points of graph traversal). Pass {@link #NONE} if all objects
811 	 *            reachable from {@code want} are desired, such as when serving
812 	 *            a clone.
813 	 * @throws java.io.IOException
814 	 *             when some I/O problem occur during reading objects.
815 	 */
816 	public void preparePack(ProgressMonitor countingMonitor,
817 			@NonNull Set<? extends ObjectId> want,
818 			@NonNull Set<? extends ObjectId> have) throws IOException {
819 		preparePack(countingMonitor, want, have, NONE, NONE);
820 	}
821 
822 	/**
823 	 * Prepare the list of objects to be written to the pack stream.
824 	 * <p>
825 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
826 	 * specifying commits that should not be walked past ("shallow" commits).
827 	 * The caller is responsible for filtering out commits that should not be
828 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
829 	 * the shallow set.
830 	 *
831 	 * @param countingMonitor
832 	 *            progress during object enumeration.
833 	 * @param want
834 	 *            objects of interest, ancestors of which will be included in
835 	 *            the pack. Must not be {@code null}.
836 	 * @param have
837 	 *            objects whose ancestors (up to and including {@code shallow}
838 	 *            commits) do not need to be included in the pack because they
839 	 *            are already available from elsewhere. Must not be
840 	 *            {@code null}.
841 	 * @param shallow
842 	 *            commits indicating the boundary of the history marked with
843 	 *            {@code have}. Shallow commits have parents but those parents
844 	 *            are considered not to be already available. Parents of
845 	 *            {@code shallow} commits and earlier generations will be
846 	 *            included in the pack if requested by {@code want}. Must not be
847 	 *            {@code null}.
848 	 * @throws java.io.IOException
849 	 *             an I/O problem occurred while reading objects.
850 	 */
851 	public void preparePack(ProgressMonitor countingMonitor,
852 			@NonNull Set<? extends ObjectId> want,
853 			@NonNull Set<? extends ObjectId> have,
854 			@NonNull Set<? extends ObjectId> shallow) throws IOException {
855 		preparePack(countingMonitor, want, have, shallow, NONE);
856 	}
857 
858 	/**
859 	 * Prepare the list of objects to be written to the pack stream.
860 	 * <p>
861 	 * Like {@link #preparePack(ProgressMonitor, Set, Set)} but also allows
862 	 * specifying commits that should not be walked past ("shallow" commits).
863 	 * The caller is responsible for filtering out commits that should not be
864 	 * shallow any more ("unshallow" commits as in {@link #setShallowPack}) from
865 	 * the shallow set.
866 	 *
867 	 * @param countingMonitor
868 	 *            progress during object enumeration.
869 	 * @param want
870 	 *            objects of interest, ancestors of which will be included in
871 	 *            the pack. Must not be {@code null}.
872 	 * @param have
873 	 *            objects whose ancestors (up to and including {@code shallow}
874 	 *            commits) do not need to be included in the pack because they
875 	 *            are already available from elsewhere. Must not be
876 	 *            {@code null}.
877 	 * @param shallow
878 	 *            commits indicating the boundary of the history marked with
879 	 *            {@code have}. Shallow commits have parents but those parents
880 	 *            are considered not to be already available. Parents of
881 	 *            {@code shallow} commits and earlier generations will be
882 	 *            included in the pack if requested by {@code want}. Must not be
883 	 *            {@code null}.
884 	 * @param noBitmaps
885 	 *            collection of objects to be excluded from bitmap commit
886 	 *            selection.
887 	 * @throws java.io.IOException
888 	 *             an I/O problem occurred while reading objects.
889 	 */
890 	public void preparePack(ProgressMonitor countingMonitor,
891 			@NonNull Set<? extends ObjectId> want,
892 			@NonNull Set<? extends ObjectId> have,
893 			@NonNull Set<? extends ObjectId> shallow,
894 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
895 		try (ObjectWalk ow = getObjectWalk()) {
896 			ow.assumeShallow(shallow);
897 			preparePack(countingMonitor, ow, want, have, noBitmaps);
898 		}
899 	}
900 
901 	private ObjectWalk getObjectWalk() {
902 		return shallowPack ? new DepthWalk.ObjectWalk(reader, depth - 1)
903 				: new ObjectWalk(reader);
904 	}
905 
906 	/**
907 	 * A visitation policy which uses the depth at which the object is seen to
908 	 * decide if re-traversal is necessary. In particular, if the object has
909 	 * already been visited at this depth or shallower, it is not necessary to
910 	 * re-visit at this depth.
911 	 */
912 	private static class DepthAwareVisitationPolicy
913 			implements ObjectWalk.VisitationPolicy {
914 		private final Map<ObjectId, Integer> lowestDepthVisited = new HashMap<>();
915 
916 		private final ObjectWalk walk;
917 
918 		DepthAwareVisitationPolicy(ObjectWalk walk) {
919 			this.walk = requireNonNull(walk);
920 		}
921 
922 		@Override
923 		public boolean shouldVisit(RevObject o) {
924 			Integer lastDepth = lowestDepthVisited.get(o);
925 			if (lastDepth == null) {
926 				return true;
927 			}
928 			return walk.getTreeDepth() < lastDepth.intValue();
929 		}
930 
931 		@Override
932 		public void visited(RevObject o) {
933 			lowestDepthVisited.put(o, Integer.valueOf(walk.getTreeDepth()));
934 		}
935 	}
936 
937 	/**
938 	 * Prepare the list of objects to be written to the pack stream.
939 	 * <p>
940 	 * Basing on these 2 sets, another set of objects to put in a pack file is
941 	 * created: this set consists of all objects reachable (ancestors) from
942 	 * interesting objects, except uninteresting objects and their ancestors.
943 	 * This method uses class {@link org.eclipse.jgit.revwalk.ObjectWalk}
944 	 * extensively to find out that appropriate set of output objects and their
945 	 * optimal order in output pack. Order is consistent with general git
946 	 * in-pack rules: sort by object type, recency, path and delta-base first.
947 	 * </p>
948 	 *
949 	 * @param countingMonitor
950 	 *            progress during object enumeration.
951 	 * @param walk
952 	 *            ObjectWalk to perform enumeration.
953 	 * @param interestingObjects
954 	 *            collection of objects to be marked as interesting (start
955 	 *            points of graph traversal). Must not be {@code null}.
956 	 * @param uninterestingObjects
957 	 *            collection of objects to be marked as uninteresting (end
958 	 *            points of graph traversal). Pass {@link #NONE} if all objects
959 	 *            reachable from {@code want} are desired, such as when serving
960 	 *            a clone.
961 	 * @param noBitmaps
962 	 *            collection of objects to be excluded from bitmap commit
963 	 *            selection.
964 	 * @throws java.io.IOException
965 	 *             when some I/O problem occur during reading objects.
966 	 */
967 	public void preparePack(ProgressMonitor countingMonitor,
968 			@NonNull ObjectWalk walk,
969 			@NonNull Set<? extends ObjectId> interestingObjects,
970 			@NonNull Set<? extends ObjectId> uninterestingObjects,
971 			@NonNull Set<? extends ObjectId> noBitmaps)
972 			throws IOException {
973 		if (countingMonitor == null)
974 			countingMonitor = NullProgressMonitor.INSTANCE;
975 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
976 			throw new IllegalArgumentException(
977 					JGitText.get().shallowPacksRequireDepthWalk);
978 		if (filterSpec.getTreeDepthLimit() >= 0) {
979 			walk.setVisitationPolicy(new DepthAwareVisitationPolicy(walk));
980 		}
981 		findObjectsToPack(countingMonitor, walk, interestingObjects,
982 				uninterestingObjects, noBitmaps);
983 	}
984 
985 	/**
986 	 * Determine if the pack file will contain the requested object.
987 	 *
988 	 * @param id
989 	 *            the object to test the existence of.
990 	 * @return true if the object will appear in the output pack file.
991 	 * @throws java.io.IOException
992 	 *             a cached pack cannot be examined.
993 	 */
994 	public boolean willInclude(AnyObjectId id) throws IOException {
995 		ObjectToPack obj = objectsMap.get(id);
996 		return obj != null && !obj.isEdge();
997 	}
998 
999 	/**
1000 	 * Lookup the ObjectToPack object for a given ObjectId.
1001 	 *
1002 	 * @param id
1003 	 *            the object to find in the pack.
1004 	 * @return the object we are packing, or null.
1005 	 */
1006 	public ObjectToPack get(AnyObjectId id) {
1007 		ObjectToPack obj = objectsMap.get(id);
1008 		return obj != null && !obj.isEdge() ? obj : null;
1009 	}
1010 
1011 	/**
1012 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
1013 	 * pack, as used to name a pack file in repository.
1014 	 *
1015 	 * @return ObjectId representing SHA-1 name of a pack that was created.
1016 	 */
1017 	public ObjectId computeName() {
1018 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
1019 		final MessageDigest md = Constants.newMessageDigest();
1020 		for (ObjectToPack otp : sortByName()) {
1021 			otp.copyRawTo(buf, 0);
1022 			md.update(buf, 0, OBJECT_ID_LENGTH);
1023 		}
1024 		return ObjectId.fromRaw(md.digest());
1025 	}
1026 
1027 	/**
1028 	 * Returns the index format version that will be written.
1029 	 * <p>
1030 	 * This method can only be invoked after
1031 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
1032 	 * been invoked and completed successfully.
1033 	 *
1034 	 * @return the index format version.
1035 	 */
1036 	public int getIndexVersion() {
1037 		int indexVersion = config.getIndexVersion();
1038 		if (indexVersion <= 0) {
1039 			for (BlockList<ObjectToPack> objs : objectsLists)
1040 				indexVersion = Math.max(indexVersion,
1041 						PackIndexWriter.oldestPossibleFormat(objs));
1042 		}
1043 		return indexVersion;
1044 	}
1045 
1046 	/**
1047 	 * Create an index file to match the pack file just written.
1048 	 * <p>
1049 	 * Called after
1050 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
1051 	 * <p>
1052 	 * Writing an index is only required for local pack storage. Packs sent on
1053 	 * the network do not need to create an index.
1054 	 *
1055 	 * @param indexStream
1056 	 *            output for the index data. Caller is responsible for closing
1057 	 *            this stream.
1058 	 * @throws java.io.IOException
1059 	 *             the index data could not be written to the supplied stream.
1060 	 */
1061 	public void writeIndex(OutputStream indexStream) throws IOException {
1062 		if (isIndexDisabled())
1063 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
1064 
1065 		long writeStart = System.currentTimeMillis();
1066 		final PackIndexWriter iw = PackIndexWriter.createVersion(
1067 				indexStream, getIndexVersion());
1068 		iw.write(sortByName(), packcsum);
1069 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1070 	}
1071 
1072 	/**
1073 	 * Create a bitmap index file to match the pack file just written.
1074 	 * <p>
1075 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
1076 	 *
1077 	 * @param bitmapIndexStream
1078 	 *            output for the bitmap index data. Caller is responsible for
1079 	 *            closing this stream.
1080 	 * @throws java.io.IOException
1081 	 *             the index data could not be written to the supplied stream.
1082 	 */
1083 	public void writeBitmapIndex(OutputStream bitmapIndexStream)
1084 			throws IOException {
1085 		if (writeBitmaps == null)
1086 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
1087 
1088 		long writeStart = System.currentTimeMillis();
1089 		final PackBitmapIndexWriterV1ile/PackBitmapIndexWriterV1.html#PackBitmapIndexWriterV1">PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
1090 		iw.write(writeBitmaps, packcsum);
1091 		stats.timeWriting += System.currentTimeMillis() - writeStart;
1092 	}
1093 
1094 	private List<ObjectToPack> sortByName() {
1095 		if (sortedByName == null) {
1096 			int cnt = 0;
1097 			cnt += objectsLists[OBJ_COMMIT].size();
1098 			cnt += objectsLists[OBJ_TREE].size();
1099 			cnt += objectsLists[OBJ_BLOB].size();
1100 			cnt += objectsLists[OBJ_TAG].size();
1101 
1102 			sortedByName = new BlockList<>(cnt);
1103 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
1104 			sortedByName.addAll(objectsLists[OBJ_TREE]);
1105 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
1106 			sortedByName.addAll(objectsLists[OBJ_TAG]);
1107 			Collections.sort(sortedByName);
1108 		}
1109 		return sortedByName;
1110 	}
1111 
1112 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
1113 			long cnt) {
1114 		state.phase = phase;
1115 		String task;
1116 		switch (phase) {
1117 		case COUNTING:
1118 			task = JGitText.get().countingObjects;
1119 			break;
1120 		case GETTING_SIZES:
1121 			task = JGitText.get().searchForSizes;
1122 			break;
1123 		case FINDING_SOURCES:
1124 			task = JGitText.get().searchForReuse;
1125 			break;
1126 		case COMPRESSING:
1127 			task = JGitText.get().compressingObjects;
1128 			break;
1129 		case WRITING:
1130 			task = JGitText.get().writingObjects;
1131 			break;
1132 		case BUILDING_BITMAPS:
1133 			task = JGitText.get().buildingBitmaps;
1134 			break;
1135 		default:
1136 			throw new IllegalArgumentException(
1137 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
1138 		}
1139 		monitor.beginTask(task, (int) cnt);
1140 	}
1141 
1142 	private void endPhase(ProgressMonitor monitor) {
1143 		monitor.endTask();
1144 	}
1145 
1146 	/**
1147 	 * Write the prepared pack to the supplied stream.
1148 	 * <p>
1149 	 * Called after
1150 	 * {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set, Set)} or
1151 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
1152 	 * <p>
1153 	 * Performs delta search if enabled and writes the pack stream.
1154 	 * <p>
1155 	 * All reused objects data checksum (Adler32/CRC32) is computed and
1156 	 * validated against existing checksum.
1157 	 *
1158 	 * @param compressMonitor
1159 	 *            progress monitor to report object compression work.
1160 	 * @param writeMonitor
1161 	 *            progress monitor to report the number of objects written.
1162 	 * @param packStream
1163 	 *            output stream of pack data. The stream should be buffered by
1164 	 *            the caller. The caller is responsible for closing the stream.
1165 	 * @throws java.io.IOException
1166 	 *             an error occurred reading a local object's data to include in
1167 	 *             the pack, or writing compressed object data to the output
1168 	 *             stream.
1169 	 * @throws WriteAbortedException
1170 	 *             the write operation is aborted by
1171 	 *             {@link org.eclipse.jgit.transport.ObjectCountCallback} .
1172 	 */
1173 	public void writePack(ProgressMonitor compressMonitor,
1174 			ProgressMonitor writeMonitor, OutputStream packStream)
1175 			throws IOException {
1176 		if (compressMonitor == null)
1177 			compressMonitor = NullProgressMonitor.INSTANCE;
1178 		if (writeMonitor == null)
1179 			writeMonitor = NullProgressMonitor.INSTANCE;
1180 
1181 		excludeInPacks = null;
1182 		excludeInPackLast = null;
1183 
1184 		boolean needSearchForReuse = reuseSupport != null && (
1185 				   reuseDeltas
1186 				|| config.isReuseObjects()
1187 				|| !cachedPacks.isEmpty());
1188 
1189 		if (compressMonitor instanceof BatchingProgressMonitor) {
1190 			long delay = 1000;
1191 			if (needSearchForReuse && config.isDeltaCompress())
1192 				delay = 500;
1193 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
1194 					delay,
1195 					TimeUnit.MILLISECONDS);
1196 		}
1197 
1198 		if (needSearchForReuse)
1199 			searchForReuse(compressMonitor);
1200 		if (config.isDeltaCompress())
1201 			searchForDeltas(compressMonitor);
1202 
1203 		crc32 = new CRC32();
1204 		final PackOutputStreamrage/pack/PackOutputStream.html#PackOutputStream">PackOutputStream out = new PackOutputStream(
1205 			writeMonitor,
1206 			isIndexDisabled()
1207 				? packStream
1208 				: new CheckedOutputStream(packStream, crc32),
1209 			this);
1210 
1211 		long objCnt = packfileUriConfig == null ? getObjectCount() :
1212 			getUnoffloadedObjectCount();
1213 		stats.totalObjects = objCnt;
1214 		if (callback != null)
1215 			callback.setObjectCount(objCnt);
1216 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
1217 		long writeStart = System.currentTimeMillis();
1218 		try {
1219 			List<CachedPack> unwrittenCachedPacks;
1220 
1221 			if (packfileUriConfig != null) {
1222 				unwrittenCachedPacks = new ArrayList<>();
1223 				CachedPackUriProvider p = packfileUriConfig.cachedPackUriProvider;
1224 				PacketLineOut o = packfileUriConfig.pckOut;
1225 
1226 				o.writeString("packfile-uris\n"); //$NON-NLS-1$
1227 				for (CachedPack pack : cachedPacks) {
1228 					CachedPackUriProvider.PackInfo packInfo = p.getInfo(
1229 							pack, packfileUriConfig.protocolsSupported);
1230 					if (packInfo != null) {
1231 						o.writeString(packInfo.getHash() + ' ' +
1232 								packInfo.getUri() + '\n');
1233 					} else {
1234 						unwrittenCachedPacks.add(pack);
1235 					}
1236 				}
1237 				packfileUriConfig.pckOut.writeDelim();
1238 				packfileUriConfig.pckOut.writeString("packfile\n"); //$NON-NLS-1$
1239 			} else {
1240 				unwrittenCachedPacks = cachedPacks;
1241 			}
1242 
1243 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
1244 			out.flush();
1245 
1246 			writeObjects(out);
1247 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1248 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1249 					if (typeStat == null)
1250 						continue;
1251 					stats.thinPackBytes += typeStat.bytes;
1252 				}
1253 			}
1254 
1255 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1256 			for (CachedPack pack : unwrittenCachedPacks) {
1257 				long deltaCnt = pack.getDeltaCount();
1258 				stats.reusedObjects += pack.getObjectCount();
1259 				stats.reusedDeltas += deltaCnt;
1260 				stats.totalDeltas += deltaCnt;
1261 				reuseSupport.copyPackAsIs(out, pack);
1262 			}
1263 			writeChecksum(out);
1264 			out.flush();
1265 		} finally {
1266 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1267 			stats.depth = depth;
1268 
1269 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1270 				if (typeStat == null)
1271 					continue;
1272 				typeStat.cntDeltas += typeStat.reusedDeltas;
1273 				stats.reusedObjects += typeStat.reusedObjects;
1274 				stats.reusedDeltas += typeStat.reusedDeltas;
1275 				stats.totalDeltas += typeStat.cntDeltas;
1276 			}
1277 		}
1278 
1279 		stats.totalBytes = out.length();
1280 		reader.close();
1281 		endPhase(writeMonitor);
1282 	}
1283 
1284 	/**
1285 	 * Get statistics of what this PackWriter did in order to create the final
1286 	 * pack stream.
1287 	 *
1288 	 * @return description of what this PackWriter did in order to create the
1289 	 *         final pack stream. This should only be invoked after the calls to
1290 	 *         create the pack/index/bitmap have completed.
1291 	 */
1292 	public PackStatistics getStatistics() {
1293 		return new PackStatistics(stats);
1294 	}
1295 
1296 	/**
1297 	 * Get snapshot of the current state of this PackWriter.
1298 	 *
1299 	 * @return snapshot of the current state of this PackWriter.
1300 	 */
1301 	public State getState() {
1302 		return state.snapshot();
1303 	}
1304 
1305 	/**
1306 	 * {@inheritDoc}
1307 	 * <p>
1308 	 * Release all resources used by this writer.
1309 	 */
1310 	@Override
1311 	public void close() {
1312 		reader.close();
1313 		if (myDeflater != null) {
1314 			myDeflater.end();
1315 			myDeflater = null;
1316 		}
1317 		instances.remove(selfRef);
1318 	}
1319 
1320 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1321 		long cnt = 0;
1322 		cnt += objectsLists[OBJ_COMMIT].size();
1323 		cnt += objectsLists[OBJ_TREE].size();
1324 		cnt += objectsLists[OBJ_BLOB].size();
1325 		cnt += objectsLists[OBJ_TAG].size();
1326 
1327 		long start = System.currentTimeMillis();
1328 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1329 		if (cnt <= 4096) {
1330 			// For small object counts, do everything as one list.
1331 			BlockList<ObjectToPack> tmp = new BlockList<>((int) cnt);
1332 			tmp.addAll(objectsLists[OBJ_TAG]);
1333 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1334 			tmp.addAll(objectsLists[OBJ_TREE]);
1335 			tmp.addAll(objectsLists[OBJ_BLOB]);
1336 			searchForReuse(monitor, tmp);
1337 			if (pruneCurrentObjectList) {
1338 				// If the list was pruned, we need to re-prune the main lists.
1339 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1340 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1341 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1342 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1343 			}
1344 		} else {
1345 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1346 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1347 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1348 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1349 		}
1350 		endPhase(monitor);
1351 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1352 
1353 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1354 			cutDeltaChains(objectsLists[OBJ_TREE]);
1355 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1356 		}
1357 	}
1358 
1359 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1360 			throws IOException, MissingObjectException {
1361 		pruneCurrentObjectList = false;
1362 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1363 		if (pruneCurrentObjectList)
1364 			pruneEdgesFromObjectList(list);
1365 	}
1366 
1367 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1368 			throws IOException {
1369 		int max = config.getMaxDeltaDepth();
1370 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1371 			int d = 0;
1372 			ObjectToPack b = list.get(idx).getDeltaBase();
1373 			while (b != null) {
1374 				if (d < b.getChainLength())
1375 					break;
1376 				b.setChainLength(++d);
1377 				if (d >= max && b.isDeltaRepresentation()) {
1378 					reselectNonDelta(b);
1379 					break;
1380 				}
1381 				b = b.getDeltaBase();
1382 			}
1383 		}
1384 		if (config.isDeltaCompress()) {
1385 			for (ObjectToPack otp : list)
1386 				otp.clearChainLength();
1387 		}
1388 	}
1389 
1390 	private void searchForDeltas(ProgressMonitor monitor)
1391 			throws MissingObjectException, IncorrectObjectTypeException,
1392 			IOException {
1393 		// Commits and annotated tags tend to have too many differences to
1394 		// really benefit from delta compression. Consequently just don't
1395 		// bother examining those types here.
1396 		//
1397 		ObjectToPack[] list = new ObjectToPack[
1398 				  objectsLists[OBJ_TREE].size()
1399 				+ objectsLists[OBJ_BLOB].size()
1400 				+ edgeObjects.size()];
1401 		int cnt = 0;
1402 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1403 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1404 		if (cnt == 0)
1405 			return;
1406 		int nonEdgeCnt = cnt;
1407 
1408 		// Queue up any edge objects that we might delta against.  We won't
1409 		// be sending these as we assume the other side has them, but we need
1410 		// them in the search phase below.
1411 		//
1412 		for (ObjectToPack eo : edgeObjects) {
1413 			eo.setWeight(0);
1414 			list[cnt++] = eo;
1415 		}
1416 
1417 		// Compute the sizes of the objects so we can do a proper sort.
1418 		// We let the reader skip missing objects if it chooses. For
1419 		// some readers this can be a huge win. We detect missing objects
1420 		// by having set the weights above to 0 and allowing the delta
1421 		// search code to discover the missing object and skip over it, or
1422 		// abort with an exception if we actually had to have it.
1423 		//
1424 		final long sizingStart = System.currentTimeMillis();
1425 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1426 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1427 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1428 		try {
1429 			final long limit = Math.min(
1430 					config.getBigFileThreshold(),
1431 					Integer.MAX_VALUE);
1432 			for (;;) {
1433 				try {
1434 					if (!sizeQueue.next())
1435 						break;
1436 				} catch (MissingObjectException notFound) {
1437 					monitor.update(1);
1438 					if (ignoreMissingUninteresting) {
1439 						ObjectToPack otp = sizeQueue.getCurrent();
1440 						if (otp != null && otp.isEdge()) {
1441 							otp.setDoNotDelta();
1442 							continue;
1443 						}
1444 
1445 						otp = objectsMap.get(notFound.getObjectId());
1446 						if (otp != null && otp.isEdge()) {
1447 							otp.setDoNotDelta();
1448 							continue;
1449 						}
1450 					}
1451 					throw notFound;
1452 				}
1453 
1454 				ObjectToPack otp = sizeQueue.getCurrent();
1455 				if (otp == null)
1456 					otp = objectsMap.get(sizeQueue.getObjectId());
1457 
1458 				long sz = sizeQueue.getSize();
1459 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1460 					otp.setWeight((int) sz);
1461 				else
1462 					otp.setDoNotDelta(); // too small, or too big
1463 				monitor.update(1);
1464 			}
1465 		} finally {
1466 			sizeQueue.release();
1467 		}
1468 		endPhase(monitor);
1469 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1470 
1471 		// Sort the objects by path hash so like files are near each other,
1472 		// and then by size descending so that bigger files are first. This
1473 		// applies "Linus' Law" which states that newer files tend to be the
1474 		// bigger ones, because source files grow and hardly ever shrink.
1475 		//
1476 		Arrays.sort(list, 0, cnt, (ObjectToPack"../../../../../../org/eclipse/jgit/internal/storage/pack/ObjectToPack.html#ObjectToPack">ObjectToPack a, ObjectToPack b) -> {
1477 			int cmp = (a.isDoNotDelta() ? 1 : 0) - (b.isDoNotDelta() ? 1 : 0);
1478 			if (cmp != 0) {
1479 				return cmp;
1480 			}
1481 
1482 			cmp = a.getType() - b.getType();
1483 			if (cmp != 0) {
1484 				return cmp;
1485 			}
1486 
1487 			cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1488 			if (cmp != 0) {
1489 				return cmp;
1490 			}
1491 
1492 			cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1493 			if (cmp != 0) {
1494 				return cmp;
1495 			}
1496 
1497 			cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1498 			if (cmp != 0) {
1499 				return cmp;
1500 			}
1501 
1502 			return b.getWeight() - a.getWeight();
1503 		});
1504 
1505 		// Above we stored the objects we cannot delta onto the end.
1506 		// Remove them from the list so we don't waste time on them.
1507 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1508 			if (!list[cnt - 1].isEdge())
1509 				nonEdgeCnt--;
1510 			cnt--;
1511 		}
1512 		if (cnt == 0)
1513 			return;
1514 
1515 		final long searchStart = System.currentTimeMillis();
1516 		searchForDeltas(monitor, list, cnt);
1517 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1518 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1519 
1520 		for (int i = 0; i < cnt; i++)
1521 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1522 				stats.deltasFound++;
1523 	}
1524 
1525 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1526 		for (ObjectToPack otp : objectsLists[type]) {
1527 			if (otp.isDoNotDelta()) // delta is disabled for this path
1528 				continue;
1529 			if (otp.isDeltaRepresentation()) // already reusing a delta
1530 				continue;
1531 			otp.setWeight(0);
1532 			list[cnt++] = otp;
1533 		}
1534 		return cnt;
1535 	}
1536 
1537 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1538 		otp.clearDeltaBase();
1539 		otp.clearReuseAsIs();
1540 		boolean old = reuseDeltas;
1541 		reuseDeltas = false;
1542 		reuseSupport.selectObjectRepresentation(this,
1543 				NullProgressMonitor.INSTANCE,
1544 				Collections.singleton(otp));
1545 		reuseDeltas = old;
1546 	}
1547 
1548 	private void searchForDeltas(final ProgressMonitor monitor,
1549 			final ObjectToPack[] list, final int cnt)
1550 			throws MissingObjectException, IncorrectObjectTypeException,
1551 			LargeObjectException, IOException {
1552 		int threads = config.getThreads();
1553 		if (threads == 0)
1554 			threads = Runtime.getRuntime().availableProcessors();
1555 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1556 			singleThreadDeltaSearch(monitor, list, cnt);
1557 		else
1558 			parallelDeltaSearch(monitor, list, cnt, threads);
1559 	}
1560 
1561 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1562 			ObjectToPack[] list, int cnt) throws IOException {
1563 		long totalWeight = 0;
1564 		for (int i = 0; i < cnt; i++) {
1565 			ObjectToPack o = list[i];
1566 			totalWeight += DeltaTask.getAdjustedWeight(o);
1567 		}
1568 
1569 		long bytesPerUnit = 1;
1570 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1571 			bytesPerUnit <<= 10;
1572 		int cost = (int) (totalWeight / bytesPerUnit);
1573 		if (totalWeight % bytesPerUnit != 0)
1574 			cost++;
1575 
1576 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1577 		new DeltaWindow(config, new DeltaCache(config), reader,
1578 				monitor, bytesPerUnit,
1579 				list, 0, cnt).search();
1580 		endPhase(monitor);
1581 	}
1582 
1583 	private void parallelDeltaSearch(ProgressMonitor monitor,
1584 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1585 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1586 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1587 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1588 				reader, dc, pm,
1589 				list, 0, cnt);
1590 		taskBlock.partitionTasks();
1591 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1592 		pm.startWorkers(taskBlock.tasks.size());
1593 
1594 		Executor executor = config.getExecutor();
1595 		final List<Throwable> errors =
1596 				Collections.synchronizedList(new ArrayList<>(threads));
1597 		if (executor instanceof ExecutorService) {
1598 			// Caller supplied us a service, use it directly.
1599 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1600 		} else if (executor == null) {
1601 			// Caller didn't give us a way to run the tasks, spawn up a
1602 			// temporary thread pool and make sure it tears down cleanly.
1603 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1604 			try {
1605 				runTasks(pool, pm, taskBlock, errors);
1606 			} finally {
1607 				pool.shutdown();
1608 				for (;;) {
1609 					try {
1610 						if (pool.awaitTermination(60, TimeUnit.SECONDS))
1611 							break;
1612 					} catch (InterruptedException e) {
1613 						throw new IOException(
1614 								JGitText.get().packingCancelledDuringObjectsWriting);
1615 					}
1616 				}
1617 			}
1618 		} else {
1619 			// The caller gave us an executor, but it might not do
1620 			// asynchronous execution.  Wrap everything and hope it
1621 			// can schedule these for us.
1622 			for (DeltaTask task : taskBlock.tasks) {
1623 				executor.execute(() -> {
1624 					try {
1625 						task.call();
1626 					} catch (Throwable failure) {
1627 						errors.add(failure);
1628 					}
1629 				});
1630 			}
1631 			try {
1632 				pm.waitForCompletion();
1633 			} catch (InterruptedException ie) {
1634 				// We can't abort the other tasks as we have no handle.
1635 				// Cross our fingers and just break out anyway.
1636 				//
1637 				throw new IOException(
1638 						JGitText.get().packingCancelledDuringObjectsWriting);
1639 			}
1640 		}
1641 
1642 		// If any task threw an error, try to report it back as
1643 		// though we weren't using a threaded search algorithm.
1644 		//
1645 		if (!errors.isEmpty()) {
1646 			Throwable err = errors.get(0);
1647 			if (err instanceof Error)
1648 				throw (Error) err;
1649 			if (err instanceof RuntimeException)
1650 				throw (RuntimeException) err;
1651 			if (err instanceof IOException)
1652 				throw (IOException) err;
1653 
1654 			throw new IOException(err.getMessage(), err);
1655 		}
1656 		endPhase(monitor);
1657 	}
1658 
1659 	private static void runTasks(ExecutorService pool,
1660 			ThreadSafeProgressMonitor pm,
1661 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1662 		List<Future<?>> futures = new ArrayList<>(tb.tasks.size());
1663 		for (DeltaTask task : tb.tasks)
1664 			futures.add(pool.submit(task));
1665 
1666 		try {
1667 			pm.waitForCompletion();
1668 			for (Future<?> f : futures) {
1669 				try {
1670 					f.get();
1671 				} catch (ExecutionException failed) {
1672 					errors.add(failed.getCause());
1673 				}
1674 			}
1675 		} catch (InterruptedException ie) {
1676 			for (Future<?> f : futures)
1677 				f.cancel(true);
1678 			throw new IOException(
1679 					JGitText.get().packingCancelledDuringObjectsWriting);
1680 		}
1681 	}
1682 
1683 	private void writeObjects(PackOutputStream out) throws IOException {
1684 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1685 		writeObjects(out, objectsLists[OBJ_TAG]);
1686 		writeObjects(out, objectsLists[OBJ_TREE]);
1687 		writeObjects(out, objectsLists[OBJ_BLOB]);
1688 	}
1689 
1690 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1691 			throws IOException {
1692 		if (list.isEmpty())
1693 			return;
1694 
1695 		typeStats = stats.objectTypes[list.get(0).getType()];
1696 		long beginOffset = out.length();
1697 
1698 		if (reuseSupport != null) {
1699 			reuseSupport.writeObjects(out, list);
1700 		} else {
1701 			for (ObjectToPack otp : list)
1702 				out.writeObject(otp);
1703 		}
1704 
1705 		typeStats.bytes += out.length() - beginOffset;
1706 		typeStats.cntObjects = list.size();
1707 	}
1708 
1709 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1710 		if (!otp.isWritten())
1711 			writeObjectImpl(out, otp);
1712 	}
1713 
1714 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1715 			throws IOException {
1716 		if (otp.wantWrite()) {
1717 			// A cycle exists in this delta chain. This should only occur if a
1718 			// selected object representation disappeared during writing
1719 			// (for example due to a concurrent repack) and a different base
1720 			// was chosen, forcing a cycle. Select something other than a
1721 			// delta, and write this object.
1722 			reselectNonDelta(otp);
1723 		}
1724 		otp.markWantWrite();
1725 
1726 		while (otp.isReuseAsIs()) {
1727 			writeBase(out, otp.getDeltaBase());
1728 			if (otp.isWritten())
1729 				return; // Delta chain cycle caused this to write already.
1730 
1731 			crc32.reset();
1732 			otp.setOffset(out.length());
1733 			try {
1734 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1735 				out.endObject();
1736 				otp.setCRC((int) crc32.getValue());
1737 				typeStats.reusedObjects++;
1738 				if (otp.isDeltaRepresentation()) {
1739 					typeStats.reusedDeltas++;
1740 					typeStats.deltaBytes += out.length() - otp.getOffset();
1741 				}
1742 				return;
1743 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1744 				if (otp.getOffset() == out.length()) {
1745 					otp.setOffset(0);
1746 					otp.clearDeltaBase();
1747 					otp.clearReuseAsIs();
1748 					reuseSupport.selectObjectRepresentation(this,
1749 							NullProgressMonitor.INSTANCE,
1750 							Collections.singleton(otp));
1751 					continue;
1752 				} else {
1753 					// Object writing already started, we cannot recover.
1754 					//
1755 					CorruptObjectException coe;
1756 					coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1757 					coe.initCause(gone);
1758 					throw coe;
1759 				}
1760 			}
1761 		}
1762 
1763 		// If we reached here, reuse wasn't possible.
1764 		//
1765 		if (otp.isDeltaRepresentation())
1766 			writeDeltaObjectDeflate(out, otp);
1767 		else
1768 			writeWholeObjectDeflate(out, otp);
1769 		out.endObject();
1770 		otp.setCRC((int) crc32.getValue());
1771 	}
1772 
1773 	private void writeBase(PackOutputStream out, ObjectToPack base)
1774 			throws IOException {
1775 		if (base != null && !base.isWritten() && !base.isEdge())
1776 			writeObjectImpl(out, base);
1777 	}
1778 
1779 	private void writeWholeObjectDeflate(PackOutputStream out,
1780 			final ObjectToPack otp) throws IOException {
1781 		final Deflater deflater = deflater();
1782 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1783 
1784 		crc32.reset();
1785 		otp.setOffset(out.length());
1786 		out.writeHeader(otp, ldr.getSize());
1787 
1788 		deflater.reset();
1789 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1790 		ldr.copyTo(dst);
1791 		dst.finish();
1792 	}
1793 
1794 	private void writeDeltaObjectDeflate(PackOutputStream out,
1795 			final ObjectToPack otp) throws IOException {
1796 		writeBase(out, otp.getDeltaBase());
1797 
1798 		crc32.reset();
1799 		otp.setOffset(out.length());
1800 
1801 		DeltaCache.Ref ref = otp.popCachedDelta();
1802 		if (ref != null) {
1803 			byte[] zbuf = ref.get();
1804 			if (zbuf != null) {
1805 				out.writeHeader(otp, otp.getCachedSize());
1806 				out.write(zbuf);
1807 				typeStats.cntDeltas++;
1808 				typeStats.deltaBytes += out.length() - otp.getOffset();
1809 				return;
1810 			}
1811 		}
1812 
1813 		try (TemporaryBuffer.Heap delta = delta(otp)) {
1814 			out.writeHeader(otp, delta.length());
1815 
1816 			Deflater deflater = deflater();
1817 			deflater.reset();
1818 			DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1819 			delta.writeTo(dst, null);
1820 			dst.finish();
1821 		}
1822 		typeStats.cntDeltas++;
1823 		typeStats.deltaBytes += out.length() - otp.getOffset();
1824 	}
1825 
1826 	private TemporaryBuffer.Heap delta(ObjectToPack otp)
1827 			throws IOException {
1828 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1829 		byte[] res = buffer(otp);
1830 
1831 		// We never would have proposed this pair if the delta would be
1832 		// larger than the unpacked version of the object. So using it
1833 		// as our buffer limit is valid: we will never reach it.
1834 		//
1835 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1836 		index.encode(delta, res);
1837 		return delta;
1838 	}
1839 
1840 	private byte[] buffer(AnyObjectId objId) throws IOException {
1841 		return buffer(config, reader, objId);
1842 	}
1843 
1844 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1845 			throws IOException {
1846 		// PackWriter should have already pruned objects that
1847 		// are above the big file threshold, so our chances of
1848 		// the object being below it are very good. We really
1849 		// shouldn't be here, unless the implementation is odd.
1850 
1851 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1852 	}
1853 
1854 	private Deflater deflater() {
1855 		if (myDeflater == null)
1856 			myDeflater = new Deflater(config.getCompressionLevel());
1857 		return myDeflater;
1858 	}
1859 
1860 	private void writeChecksum(PackOutputStream out) throws IOException {
1861 		packcsum = out.getDigest();
1862 		out.write(packcsum);
1863 	}
1864 
1865 	private void findObjectsToPack(@NonNull ProgressMonitor countingMonitor,
1866 			@NonNull ObjectWalk walker, @NonNull Set<? extends ObjectId> want,
1867 			@NonNull Set<? extends ObjectId> have,
1868 			@NonNull Set<? extends ObjectId> noBitmaps) throws IOException {
1869 		final long countingStart = System.currentTimeMillis();
1870 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1871 
1872 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1873 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1874 		excludeFromBitmapSelection = noBitmaps;
1875 
1876 		canBuildBitmaps = config.isBuildBitmaps()
1877 				&& !shallowPack
1878 				&& have.isEmpty()
1879 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1880 		if (!shallowPack && useBitmaps) {
1881 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1882 			if (bitmapIndex != null) {
1883 				BitmapWalker bitmapWalker = new BitmapWalker(
1884 						walker, bitmapIndex, countingMonitor);
1885 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1886 				endPhase(countingMonitor);
1887 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1888 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1889 				return;
1890 			}
1891 		}
1892 
1893 		List<ObjectId> all = new ArrayList<>(want.size() + have.size());
1894 		all.addAll(want);
1895 		all.addAll(have);
1896 
1897 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1898 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1899 
1900 		walker.carry(include);
1901 
1902 		int haveEst = have.size();
1903 		if (have.isEmpty()) {
1904 			walker.sort(RevSort.COMMIT_TIME_DESC);
1905 		} else {
1906 			walker.sort(RevSort.TOPO);
1907 			if (thin)
1908 				walker.sort(RevSort.BOUNDARY, true);
1909 		}
1910 
1911 		List<RevObject> wantObjs = new ArrayList<>(want.size());
1912 		List<RevObject> haveObjs = new ArrayList<>(haveEst);
1913 		List<RevTag> wantTags = new ArrayList<>(want.size());
1914 
1915 		// Retrieve the RevWalk's versions of "want" and "have" objects to
1916 		// maintain any state previously set in the RevWalk.
1917 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1918 		try {
1919 			for (;;) {
1920 				try {
1921 					RevObject o = q.next();
1922 					if (o == null)
1923 						break;
1924 					if (have.contains(o))
1925 						haveObjs.add(o);
1926 					if (want.contains(o)) {
1927 						o.add(include);
1928 						wantObjs.add(o);
1929 						if (o instanceof RevTag)
1930 							wantTags.add((RevTag) o);
1931 					}
1932 				} catch (MissingObjectException e) {
1933 					if (ignoreMissingUninteresting
1934 							&& have.contains(e.getObjectId()))
1935 						continue;
1936 					throw e;
1937 				}
1938 			}
1939 		} finally {
1940 			q.release();
1941 		}
1942 
1943 		if (!wantTags.isEmpty()) {
1944 			all = new ArrayList<>(wantTags.size());
1945 			for (RevTag tag : wantTags)
1946 				all.add(tag.getObject());
1947 			q = walker.parseAny(all, true);
1948 			try {
1949 				while (q.next() != null) {
1950 					// Just need to pop the queue item to parse the object.
1951 				}
1952 			} finally {
1953 				q.release();
1954 			}
1955 		}
1956 
1957 		if (walker instanceof DepthWalk.ObjectWalk) {
1958 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1959 			for (RevObject obj : wantObjs) {
1960 				depthWalk.markRoot(obj);
1961 			}
1962 			// Mark the tree objects associated with "have" commits as
1963 			// uninteresting to avoid writing redundant blobs. A normal RevWalk
1964 			// lazily propagates the "uninteresting" state from a commit to its
1965 			// tree during the walk, but DepthWalks can terminate early so
1966 			// preemptively propagate that state here.
1967 			for (RevObject obj : haveObjs) {
1968 				if (obj instanceof RevCommit) {
1969 					RevTree t = ((RevCommit) obj).getTree();
1970 					depthWalk.markUninteresting(t);
1971 				}
1972 			}
1973 
1974 			if (unshallowObjects != null) {
1975 				for (ObjectId id : unshallowObjects) {
1976 					depthWalk.markUnshallow(walker.parseAny(id));
1977 				}
1978 			}
1979 		} else {
1980 			for (RevObject obj : wantObjs)
1981 				walker.markStart(obj);
1982 		}
1983 		for (RevObject obj : haveObjs)
1984 			walker.markUninteresting(obj);
1985 
1986 		final int maxBases = config.getDeltaSearchWindowSize();
1987 		Set<RevTree> baseTrees = new HashSet<>();
1988 		BlockList<RevCommit> commits = new BlockList<>();
1989 		Set<ObjectId> roots = new HashSet<>();
1990 		RevCommit c;
1991 		while ((c = walker.next()) != null) {
1992 			if (exclude(c))
1993 				continue;
1994 			if (c.has(RevFlag.UNINTERESTING)) {
1995 				if (baseTrees.size() <= maxBases)
1996 					baseTrees.add(c.getTree());
1997 				continue;
1998 			}
1999 
2000 			commits.add(c);
2001 			if (c.getParentCount() == 0) {
2002 				roots.add(c.copy());
2003 			}
2004 			countingMonitor.update(1);
2005 		}
2006 		stats.rootCommits = Collections.unmodifiableSet(roots);
2007 
2008 		if (shallowPack) {
2009 			for (RevCommit cmit : commits) {
2010 				addObject(cmit, 0);
2011 			}
2012 		} else {
2013 			int commitCnt = 0;
2014 			boolean putTagTargets = false;
2015 			for (RevCommit cmit : commits) {
2016 				if (!cmit.has(added)) {
2017 					cmit.add(added);
2018 					addObject(cmit, 0);
2019 					commitCnt++;
2020 				}
2021 
2022 				for (int i = 0; i < cmit.getParentCount(); i++) {
2023 					RevCommit p = cmit.getParent(i);
2024 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
2025 							&& !exclude(p)) {
2026 						p.add(added);
2027 						addObject(p, 0);
2028 						commitCnt++;
2029 					}
2030 				}
2031 
2032 				if (!putTagTargets && 4096 < commitCnt) {
2033 					for (ObjectId id : tagTargets) {
2034 						RevObject obj = walker.lookupOrNull(id);
2035 						if (obj instanceof RevCommit
2036 								&& obj.has(include)
2037 								&& !obj.has(RevFlag.UNINTERESTING)
2038 								&& !obj.has(added)) {
2039 							obj.add(added);
2040 							addObject(obj, 0);
2041 						}
2042 					}
2043 					putTagTargets = true;
2044 				}
2045 			}
2046 		}
2047 		commits = null;
2048 
2049 		if (thin && !baseTrees.isEmpty()) {
2050 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
2051 					objectsMap, edgeObjects, reader);
2052 			RevObject o;
2053 			while ((o = walker.nextObject()) != null) {
2054 				if (o.has(RevFlag.UNINTERESTING))
2055 					continue;
2056 				if (exclude(o))
2057 					continue;
2058 
2059 				int pathHash = walker.getPathHashCode();
2060 				byte[] pathBuf = walker.getPathBuffer();
2061 				int pathLen = walker.getPathLength();
2062 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
2063 				if (!depthSkip(o, walker)) {
2064 					filterAndAddObject(o, o.getType(), pathHash, want);
2065 				}
2066 				countingMonitor.update(1);
2067 			}
2068 		} else {
2069 			RevObject o;
2070 			while ((o = walker.nextObject()) != null) {
2071 				if (o.has(RevFlag.UNINTERESTING))
2072 					continue;
2073 				if (exclude(o))
2074 					continue;
2075 				if (!depthSkip(o, walker)) {
2076 					filterAndAddObject(o, o.getType(), walker.getPathHashCode(),
2077 									   want);
2078 				}
2079 				countingMonitor.update(1);
2080 			}
2081 		}
2082 
2083 		for (CachedPack pack : cachedPacks)
2084 			countingMonitor.update((int) pack.getObjectCount());
2085 		endPhase(countingMonitor);
2086 		stats.timeCounting = System.currentTimeMillis() - countingStart;
2087 		stats.bitmapIndexMisses = -1;
2088 	}
2089 
2090 	private void findObjectsToPackUsingBitmaps(
2091 			BitmapWalker bitmapWalker, Set<? extends ObjectId> want,
2092 			Set<? extends ObjectId> have)
2093 			throws MissingObjectException, IncorrectObjectTypeException,
2094 			IOException {
2095 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
2096 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
2097 				false);
2098 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
2099 
2100 		if (useCachedPacks && reuseSupport != null && !reuseValidate
2101 				&& (excludeInPacks == null || excludeInPacks.length == 0))
2102 			cachedPacks.addAll(
2103 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
2104 
2105 		for (BitmapObject obj : needBitmap) {
2106 			ObjectId objectId = obj.getObjectId();
2107 			if (exclude(objectId)) {
2108 				needBitmap.remove(objectId);
2109 				continue;
2110 			}
2111 			filterAndAddObject(objectId, obj.getType(), 0, want);
2112 		}
2113 
2114 		if (thin)
2115 			haveObjects = haveBitmap;
2116 	}
2117 
2118 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
2119 		final int size = list.size();
2120 		int src = 0;
2121 		int dst = 0;
2122 
2123 		for (; src < size; src++) {
2124 			ObjectToPack obj = list.get(src);
2125 			if (obj.isEdge())
2126 				continue;
2127 			if (dst != src)
2128 				list.set(dst, obj);
2129 			dst++;
2130 		}
2131 
2132 		while (dst < list.size())
2133 			list.remove(list.size() - 1);
2134 	}
2135 
2136 	/**
2137 	 * Include one object to the output file.
2138 	 * <p>
2139 	 * Objects are written in the order they are added. If the same object is
2140 	 * added twice, it may be written twice, creating a larger than necessary
2141 	 * file.
2142 	 *
2143 	 * @param object
2144 	 *            the object to add.
2145 	 * @throws org.eclipse.jgit.errors.IncorrectObjectTypeException
2146 	 *             the object is an unsupported type.
2147 	 */
2148 	public void addObject(RevObject object)
2149 			throws IncorrectObjectTypeException {
2150 		if (!exclude(object))
2151 			addObject(object, 0);
2152 	}
2153 
2154 	private void addObject(RevObject object, int pathHashCode) {
2155 		addObject(object, object.getType(), pathHashCode);
2156 	}
2157 
2158 	private void addObject(
2159 			final AnyObjectId src, final int type, final int pathHashCode) {
2160 		final ObjectToPack otp;
2161 		if (reuseSupport != null)
2162 			otp = reuseSupport.newObjectToPack(src, type);
2163 		else
2164 			otp = new ObjectToPack(src, type);
2165 		otp.setPathHash(pathHashCode);
2166 		objectsLists[type].add(otp);
2167 		objectsMap.add(otp);
2168 	}
2169 
2170 	/**
2171 	 * Determines if the object should be omitted from the pack as a result of
2172 	 * its depth (probably because of the tree:<depth> filter).
2173 	 * <p>
2174 	 * Causes {@code walker} to skip traversing the current tree, which ought to
2175 	 * have just started traversal, assuming this method is called as soon as a
2176 	 * new depth is reached.
2177 	 * <p>
2178 	 * This method increments the {@code treesTraversed} statistic.
2179 	 *
2180 	 * @param obj
2181 	 *            the object to check whether it should be omitted.
2182 	 * @param walker
2183 	 *            the walker being used for traveresal.
2184 	 * @return whether the given object should be skipped.
2185 	 */
2186 	private boolean depthSkip(@NonNull RevObject obj, ObjectWalk walker) {
2187 		long treeDepth = walker.getTreeDepth();
2188 
2189 		// Check if this object needs to be rejected because it is a tree or
2190 		// blob that is too deep from the root tree.
2191 
2192 		// A blob is considered one level deeper than the tree that contains it.
2193 		if (obj.getType() == OBJ_BLOB) {
2194 			treeDepth++;
2195 		} else {
2196 			stats.treesTraversed++;
2197 		}
2198 
2199 		if (filterSpec.getTreeDepthLimit() < 0 ||
2200 			treeDepth <= filterSpec.getTreeDepthLimit()) {
2201 			return false;
2202 		}
2203 
2204 		walker.skipTree();
2205 		return true;
2206 	}
2207 
2208 	// Adds the given object as an object to be packed, first performing
2209 	// filtering on blobs at or exceeding a given size.
2210 	private void filterAndAddObject(@NonNull AnyObjectId src, int type,
2211 			int pathHashCode, @NonNull Set<? extends AnyObjectId> want)
2212 			throws IOException {
2213 
2214 		// Check if this object needs to be rejected, doing the cheaper
2215 		// checks first.
2216 		boolean reject = filterSpec.getBlobLimit() >= 0 &&
2217 			type == OBJ_BLOB &&
2218 			!want.contains(src) &&
2219 			reader.getObjectSize(src, OBJ_BLOB) > filterSpec.getBlobLimit();
2220 		if (!reject) {
2221 			addObject(src, type, pathHashCode);
2222 		}
2223 	}
2224 
2225 	private boolean exclude(AnyObjectId objectId) {
2226 		if (excludeInPacks == null)
2227 			return false;
2228 		if (excludeInPackLast.contains(objectId))
2229 			return true;
2230 		for (ObjectIdSet idx : excludeInPacks) {
2231 			if (idx.contains(objectId)) {
2232 				excludeInPackLast = idx;
2233 				return true;
2234 			}
2235 		}
2236 		return false;
2237 	}
2238 
2239 	/**
2240 	 * Select an object representation for this writer.
2241 	 * <p>
2242 	 * An {@link org.eclipse.jgit.lib.ObjectReader} implementation should invoke
2243 	 * this method once for each representation available for an object, to
2244 	 * allow the writer to find the most suitable one for the output.
2245 	 *
2246 	 * @param otp
2247 	 *            the object being packed.
2248 	 * @param next
2249 	 *            the next available representation from the repository.
2250 	 */
2251 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
2252 		int nFmt = next.getFormat();
2253 
2254 		if (!cachedPacks.isEmpty()) {
2255 			if (otp.isEdge())
2256 				return;
2257 			if (nFmt == PACK_WHOLE || nFmt == PACK_DELTA) {
2258 				for (CachedPack pack : cachedPacks) {
2259 					if (pack.hasObject(otp, next)) {
2260 						otp.setEdge();
2261 						otp.clearDeltaBase();
2262 						otp.clearReuseAsIs();
2263 						pruneCurrentObjectList = true;
2264 						return;
2265 					}
2266 				}
2267 			}
2268 		}
2269 
2270 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
2271 			ObjectId baseId = next.getDeltaBase();
2272 			ObjectToPack ptr = objectsMap.get(baseId);
2273 			if (ptr != null && !ptr.isEdge()) {
2274 				otp.setDeltaBase(ptr);
2275 				otp.setReuseAsIs();
2276 			} else if (thin && have(ptr, baseId)) {
2277 				otp.setDeltaBase(baseId);
2278 				otp.setReuseAsIs();
2279 			} else {
2280 				otp.clearDeltaBase();
2281 				otp.clearReuseAsIs();
2282 			}
2283 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
2284 			int nWeight = next.getWeight();
2285 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
2286 				// We've chosen another PACK_WHOLE format for this object,
2287 				// choose the one that has the smaller compressed size.
2288 				//
2289 				if (otp.getWeight() <= nWeight)
2290 					return;
2291 			}
2292 			otp.clearDeltaBase();
2293 			otp.setReuseAsIs();
2294 			otp.setWeight(nWeight);
2295 		} else {
2296 			otp.clearDeltaBase();
2297 			otp.clearReuseAsIs();
2298 		}
2299 
2300 		otp.setDeltaAttempted(reuseDeltas && next.wasDeltaAttempted());
2301 		otp.select(next);
2302 	}
2303 
2304 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
2305 		return (ptr != null && ptr.isEdge())
2306 				|| (haveObjects != null && haveObjects.contains(objectId));
2307 	}
2308 
2309 	/**
2310 	 * Prepares the bitmaps to be written to the bitmap index file.
2311 	 * <p>
2312 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
2313 	 * object graph at selected commits. Writing a bitmap index is an optional
2314 	 * feature that not all pack users may require.
2315 	 * <p>
2316 	 * Called after {@link #writeIndex(OutputStream)}.
2317 	 * <p>
2318 	 * To reduce memory internal state is cleared during this method, rendering
2319 	 * the PackWriter instance useless for anything further than a call to write
2320 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
2321 	 *
2322 	 * @param pm
2323 	 *            progress monitor to report bitmap building work.
2324 	 * @return whether a bitmap index may be written.
2325 	 * @throws java.io.IOException
2326 	 *             when some I/O problem occur during reading objects.
2327 	 */
2328 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2329 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2330 				|| !cachedPacks.isEmpty())
2331 			return false;
2332 
2333 		if (pm == null)
2334 			pm = NullProgressMonitor.INSTANCE;
2335 
2336 		int numCommits = objectsLists[OBJ_COMMIT].size();
2337 		List<ObjectToPack> byName = sortByName();
2338 		sortedByName = null;
2339 		objectsLists = null;
2340 		objectsMap = null;
2341 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2342 		byName = null;
2343 
2344 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2345 				reader, writeBitmaps, pm, stats.interestingObjects, config);
2346 
2347 		Collection<PackWriterBitmapPreparer.BitmapCommit> selectedCommits = bitmapPreparer
2348 				.selectCommits(numCommits, excludeFromBitmapSelection);
2349 
2350 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2351 
2352 		BitmapWalker walker = bitmapPreparer.newBitmapWalker();
2353 		AnyObjectId last = null;
2354 		for (PackWriterBitmapPreparer.BitmapCommit cmit : selectedCommits) {
2355 			if (!cmit.isReuseWalker()) {
2356 				walker = bitmapPreparer.newBitmapWalker();
2357 			}
2358 			BitmapBuilder bitmap = walker.findObjects(
2359 					Collections.singleton(cmit), null, false);
2360 
2361 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2362 				throw new IllegalStateException(MessageFormat.format(
2363 						JGitText.get().bitmapMissingObject, cmit.name(),
2364 						last.name()));
2365 			last = cmit;
2366 			writeBitmaps.addBitmap(cmit, bitmap.build(), cmit.getFlags());
2367 
2368 			pm.update(1);
2369 		}
2370 
2371 		endPhase(pm);
2372 		return true;
2373 	}
2374 
2375 	private boolean reuseDeltaFor(ObjectToPack otp) {
2376 		int type = otp.getType();
2377 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2378 			return true;
2379 		if (type == OBJ_COMMIT)
2380 			return reuseDeltaCommits;
2381 		if (type == OBJ_TAG)
2382 			return false;
2383 		return true;
2384 	}
2385 
2386 	private class MutableState {
2387 		/** Estimated size of a single ObjectToPack instance. */
2388 		// Assume 64-bit pointers, since this is just an estimate.
2389 		private static final long OBJECT_TO_PACK_SIZE =
2390 				(2 * 8)               // Object header
2391 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2392 				+ (8 + 8)             // PackedObjectInfo fields
2393 				+ 8                   // ObjectIdOwnerMap fields
2394 				+ 40                  // AnyObjectId fields
2395 				+ 8;                  // Reference in BlockList
2396 
2397 		private final long totalDeltaSearchBytes;
2398 
2399 		private volatile PackingPhase phase;
2400 
2401 		MutableState() {
2402 			phase = PackingPhase.COUNTING;
2403 			if (config.isDeltaCompress()) {
2404 				int threads = config.getThreads();
2405 				if (threads <= 0)
2406 					threads = Runtime.getRuntime().availableProcessors();
2407 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2408 						+ config.getBigFileThreshold();
2409 			} else
2410 				totalDeltaSearchBytes = 0;
2411 		}
2412 
2413 		State snapshot() {
2414 			long objCnt = 0;
2415 			BlockList<ObjectToPack>[] lists = objectsLists;
2416 			if (lists != null) {
2417 				objCnt += lists[OBJ_COMMIT].size();
2418 				objCnt += lists[OBJ_TREE].size();
2419 				objCnt += lists[OBJ_BLOB].size();
2420 				objCnt += lists[OBJ_TAG].size();
2421 				// Exclude CachedPacks.
2422 			}
2423 
2424 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2425 			PackingPhase curr = phase;
2426 			if (curr == PackingPhase.COMPRESSING)
2427 				bytesUsed += totalDeltaSearchBytes;
2428 			return new State(curr, bytesUsed);
2429 		}
2430 	}
2431 
2432 	/** Possible states that a PackWriter can be in. */
2433 	public static enum PackingPhase {
2434 		/** Counting objects phase. */
2435 		COUNTING,
2436 
2437 		/** Getting sizes phase. */
2438 		GETTING_SIZES,
2439 
2440 		/** Finding sources phase. */
2441 		FINDING_SOURCES,
2442 
2443 		/** Compressing objects phase. */
2444 		COMPRESSING,
2445 
2446 		/** Writing objects phase. */
2447 		WRITING,
2448 
2449 		/** Building bitmaps phase. */
2450 		BUILDING_BITMAPS;
2451 	}
2452 
2453 	/** Summary of the current state of a PackWriter. */
2454 	public class State {
2455 		private final PackingPhase phase;
2456 
2457 		private final long bytesUsed;
2458 
2459 		State(PackingPhase phase, long bytesUsed) {
2460 			this.phase = phase;
2461 			this.bytesUsed = bytesUsed;
2462 		}
2463 
2464 		/** @return the PackConfig used to build the writer. */
2465 		public PackConfig getConfig() {
2466 			return config;
2467 		}
2468 
2469 		/** @return the current phase of the writer. */
2470 		public PackingPhase getPhase() {
2471 			return phase;
2472 		}
2473 
2474 		/** @return an estimate of the total memory used by the writer. */
2475 		public long estimateBytesUsed() {
2476 			return bytesUsed;
2477 		}
2478 
2479 		@SuppressWarnings("nls")
2480 		@Override
2481 		public String toString() {
2482 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2483 		}
2484 	}
2485 
2486 	/**
2487 	 * Configuration related to the packfile URI feature.
2488 	 *
2489 	 * @since 5.5
2490 	 */
2491 	public static class PackfileUriConfig {
2492 		@NonNull
2493 		private final PacketLineOut pckOut;
2494 
2495 		@NonNull
2496 		private final Collection<String> protocolsSupported;
2497 
2498 		@NonNull
2499 		private final CachedPackUriProvider cachedPackUriProvider;
2500 
2501 		/**
2502 		 * @param pckOut where to write "packfile-uri" lines to (should
2503 		 *     output to the same stream as the one passed to
2504 		 *     PackWriter#writePack)
2505 		 * @param protocolsSupported list of protocols supported (e.g. "https")
2506 		 * @param cachedPackUriProvider provider of URIs corresponding
2507 		 *     to cached packs
2508 		 * @since 5.5
2509 		 */
2510 		public PackfileUriConfig(@NonNull PacketLineOut pckOut,
2511 				@NonNull Collection<String> protocolsSupported,
2512 				@NonNull CachedPackUriProvider cachedPackUriProvider) {
2513 			this.pckOut = pckOut;
2514 			this.protocolsSupported = protocolsSupported;
2515 			this.cachedPackUriProvider = cachedPackUriProvider;
2516 		}
2517 	}
2518 }