View Javadoc
1   /*
2    * Copyright (C) 2008-2010, Google Inc.
3    * Copyright (C) 2008, Marek Zawirski <marek.zawirski@gmail.com>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.pack;
46  
47  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_DELTA;
48  import static org.eclipse.jgit.internal.storage.pack.StoredObjectRepresentation.PACK_WHOLE;
49  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
50  import static org.eclipse.jgit.lib.Constants.OBJ_BLOB;
51  import static org.eclipse.jgit.lib.Constants.OBJ_COMMIT;
52  import static org.eclipse.jgit.lib.Constants.OBJ_TAG;
53  import static org.eclipse.jgit.lib.Constants.OBJ_TREE;
54  
55  import java.io.IOException;
56  import java.io.OutputStream;
57  import java.lang.ref.WeakReference;
58  import java.security.MessageDigest;
59  import java.text.MessageFormat;
60  import java.util.ArrayList;
61  import java.util.Arrays;
62  import java.util.Collection;
63  import java.util.Collections;
64  import java.util.Comparator;
65  import java.util.HashSet;
66  import java.util.Iterator;
67  import java.util.List;
68  import java.util.Map;
69  import java.util.NoSuchElementException;
70  import java.util.Set;
71  import java.util.concurrent.ConcurrentHashMap;
72  import java.util.concurrent.ExecutionException;
73  import java.util.concurrent.Executor;
74  import java.util.concurrent.ExecutorService;
75  import java.util.concurrent.Executors;
76  import java.util.concurrent.Future;
77  import java.util.concurrent.TimeUnit;
78  import java.util.zip.CRC32;
79  import java.util.zip.CheckedOutputStream;
80  import java.util.zip.Deflater;
81  import java.util.zip.DeflaterOutputStream;
82  
83  import org.eclipse.jgit.errors.CorruptObjectException;
84  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
85  import org.eclipse.jgit.errors.LargeObjectException;
86  import org.eclipse.jgit.errors.MissingObjectException;
87  import org.eclipse.jgit.errors.StoredObjectRepresentationNotAvailableException;
88  import org.eclipse.jgit.internal.JGitText;
89  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexBuilder;
90  import org.eclipse.jgit.internal.storage.file.PackBitmapIndexWriterV1;
91  import org.eclipse.jgit.internal.storage.file.PackIndexWriter;
92  import org.eclipse.jgit.lib.AnyObjectId;
93  import org.eclipse.jgit.lib.AsyncObjectSizeQueue;
94  import org.eclipse.jgit.lib.BatchingProgressMonitor;
95  import org.eclipse.jgit.lib.BitmapIndex;
96  import org.eclipse.jgit.lib.BitmapIndex.BitmapBuilder;
97  import org.eclipse.jgit.lib.BitmapObject;
98  import org.eclipse.jgit.lib.Constants;
99  import org.eclipse.jgit.lib.NullProgressMonitor;
100 import org.eclipse.jgit.lib.ObjectId;
101 import org.eclipse.jgit.lib.ObjectIdOwnerMap;
102 import org.eclipse.jgit.lib.ObjectLoader;
103 import org.eclipse.jgit.lib.ObjectReader;
104 import org.eclipse.jgit.lib.ProgressMonitor;
105 import org.eclipse.jgit.lib.Repository;
106 import org.eclipse.jgit.lib.ThreadSafeProgressMonitor;
107 import org.eclipse.jgit.revwalk.AsyncRevObjectQueue;
108 import org.eclipse.jgit.revwalk.DepthWalk;
109 import org.eclipse.jgit.revwalk.ObjectWalk;
110 import org.eclipse.jgit.revwalk.RevCommit;
111 import org.eclipse.jgit.revwalk.RevFlag;
112 import org.eclipse.jgit.revwalk.RevObject;
113 import org.eclipse.jgit.revwalk.RevSort;
114 import org.eclipse.jgit.revwalk.RevTag;
115 import org.eclipse.jgit.revwalk.RevTree;
116 import org.eclipse.jgit.storage.pack.PackConfig;
117 import org.eclipse.jgit.storage.pack.PackStatistics;
118 import org.eclipse.jgit.transport.ObjectCountCallback;
119 import org.eclipse.jgit.transport.WriteAbortedException;
120 import org.eclipse.jgit.util.BlockList;
121 import org.eclipse.jgit.util.TemporaryBuffer;
122 
123 /**
124  * <p>
125  * PackWriter class is responsible for generating pack files from specified set
126  * of objects from repository. This implementation produce pack files in format
127  * version 2.
128  * </p>
129  * <p>
130  * Source of objects may be specified in two ways:
131  * <ul>
132  * <li>(usually) by providing sets of interesting and uninteresting objects in
133  * repository - all interesting objects and their ancestors except uninteresting
134  * objects and their ancestors will be included in pack, or</li>
135  * <li>by providing iterator of {@link RevObject} specifying exact list and
136  * order of objects in pack</li>
137  * </ul>
138  * <p>
139  * Typical usage consists of creating an instance, configuring options,
140  * preparing the list of objects by calling {@link #preparePack(Iterator)} or
141  * {@link #preparePack(ProgressMonitor, Set, Set)}, and streaming with
142  * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}. If the
143  * pack is being stored as a file the matching index can be written out after
144  * writing the pack by {@link #writeIndex(OutputStream)}. An optional bitmap
145  * index can be made by calling {@link #prepareBitmapIndex(ProgressMonitor)}
146  * followed by {@link #writeBitmapIndex(OutputStream)}.
147  * </p>
148  * <p>
149  * Class provide set of configurable options and {@link ProgressMonitor}
150  * support, as operations may take a long time for big repositories. Deltas
151  * searching algorithm is <b>NOT IMPLEMENTED</b> yet - this implementation
152  * relies only on deltas and objects reuse.
153  * </p>
154  * <p>
155  * This class is not thread safe. It is intended to be used in one thread as a
156  * single pass to produce one pack. Invoking methods multiple times or out of
157  * order is not supported as internal data structures are destroyed during
158  * certain phases to save memory when packing large repositories.
159  * </p>
160  */
161 public class PackWriter implements AutoCloseable {
162 	private static final int PACK_VERSION_GENERATED = 2;
163 
164 	/** A collection of object ids. */
165 	public interface ObjectIdSet {
166 		/**
167 		 * Returns true if the objectId is contained within the collection.
168 		 *
169 		 * @param objectId
170 		 *            the objectId to find
171 		 * @return whether the collection contains the objectId.
172 		 */
173 		boolean contains(AnyObjectId objectId);
174 	}
175 
176 	private static final Map<WeakReference<PackWriter>, Boolean> instances =
177 			new ConcurrentHashMap<WeakReference<PackWriter>, Boolean>();
178 
179 	private static final Iterable<PackWriter> instancesIterable = new Iterable<PackWriter>() {
180 		public Iterator<PackWriter> iterator() {
181 			return new Iterator<PackWriter>() {
182 				private final Iterator<WeakReference<PackWriter>> it =
183 						instances.keySet().iterator();
184 				private PackWriter next;
185 
186 				public boolean hasNext() {
187 					if (next != null)
188 						return true;
189 					while (it.hasNext()) {
190 						WeakReference<PackWriter> ref = it.next();
191 						next = ref.get();
192 						if (next != null)
193 							return true;
194 						it.remove();
195 					}
196 					return false;
197 				}
198 
199 				public PackWriter next() {
200 					if (hasNext()) {
201 						PackWriter result = next;
202 						next = null;
203 						return result;
204 					}
205 					throw new NoSuchElementException();
206 				}
207 
208 				public void remove() {
209 					throw new UnsupportedOperationException();
210 				}
211 			};
212 		}
213 	};
214 
215 	/** @return all allocated, non-released PackWriters instances. */
216 	public static Iterable<PackWriter> getInstances() {
217 		return instancesIterable;
218 	}
219 
220 	@SuppressWarnings("unchecked")
221 	private BlockList<ObjectToPack> objectsLists[] = new BlockList[OBJ_TAG + 1];
222 	{
223 		objectsLists[OBJ_COMMIT] = new BlockList<ObjectToPack>();
224 		objectsLists[OBJ_TREE] = new BlockList<ObjectToPack>();
225 		objectsLists[OBJ_BLOB] = new BlockList<ObjectToPack>();
226 		objectsLists[OBJ_TAG] = new BlockList<ObjectToPack>();
227 	}
228 
229 	private ObjectIdOwnerMap<ObjectToPack> objectsMap = new ObjectIdOwnerMap<ObjectToPack>();
230 
231 	// edge objects for thin packs
232 	private List<ObjectToPack> edgeObjects = new BlockList<ObjectToPack>();
233 
234 	// Objects the client is known to have already.
235 	private BitmapBuilder haveObjects;
236 
237 	private List<CachedPack> cachedPacks = new ArrayList<CachedPack>(2);
238 
239 	private Set<ObjectId> tagTargets = Collections.emptySet();
240 
241 	private ObjectIdSet[] excludeInPacks;
242 
243 	private ObjectIdSet excludeInPackLast;
244 
245 	private Deflater myDeflater;
246 
247 	private final ObjectReader reader;
248 
249 	/** {@link #reader} recast to the reuse interface, if it supports it. */
250 	private final ObjectReuseAsIs reuseSupport;
251 
252 	private final PackConfig config;
253 
254 	private final PackStatistics.Accumulator stats;
255 
256 	private final MutableState state;
257 
258 	private final WeakReference<PackWriter> selfRef;
259 
260 	private PackStatistics.ObjectType.Accumulator typeStats;
261 
262 	private List<ObjectToPack> sortedByName;
263 
264 	private byte packcsum[];
265 
266 	private boolean deltaBaseAsOffset;
267 
268 	private boolean reuseDeltas;
269 
270 	private boolean reuseDeltaCommits;
271 
272 	private boolean reuseValidate;
273 
274 	private boolean thin;
275 
276 	private boolean useCachedPacks;
277 
278 	private boolean useBitmaps;
279 
280 	private boolean ignoreMissingUninteresting = true;
281 
282 	private boolean pruneCurrentObjectList;
283 
284 	private boolean shallowPack;
285 
286 	private boolean canBuildBitmaps;
287 
288 	private boolean indexDisabled;
289 
290 	private int depth;
291 
292 	private Collection<? extends ObjectId> unshallowObjects;
293 
294 	private PackBitmapIndexBuilder writeBitmaps;
295 
296 	private CRC32 crc32;
297 
298 	private ObjectCountCallback callback;
299 
300 	/**
301 	 * Create writer for specified repository.
302 	 * <p>
303 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
304 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
305 	 *
306 	 * @param repo
307 	 *            repository where objects are stored.
308 	 */
309 	public PackWriter(final Repository repo) {
310 		this(repo, repo.newObjectReader());
311 	}
312 
313 	/**
314 	 * Create a writer to load objects from the specified reader.
315 	 * <p>
316 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
317 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
318 	 *
319 	 * @param reader
320 	 *            reader to read from the repository with.
321 	 */
322 	public PackWriter(final ObjectReader reader) {
323 		this(new PackConfig(), reader);
324 	}
325 
326 	/**
327 	 * Create writer for specified repository.
328 	 * <p>
329 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
330 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
331 	 *
332 	 * @param repo
333 	 *            repository where objects are stored.
334 	 * @param reader
335 	 *            reader to read from the repository with.
336 	 */
337 	public PackWriter(final Repository repo, final ObjectReader reader) {
338 		this(new PackConfig(repo), reader);
339 	}
340 
341 	/**
342 	 * Create writer with a specified configuration.
343 	 * <p>
344 	 * Objects for packing are specified in {@link #preparePack(Iterator)} or
345 	 * {@link #preparePack(ProgressMonitor, Set, Set)}.
346 	 *
347 	 * @param config
348 	 *            configuration for the pack writer.
349 	 * @param reader
350 	 *            reader to read from the repository with.
351 	 */
352 	public PackWriter(final PackConfig config, final ObjectReader reader) {
353 		this.config = config;
354 		this.reader = reader;
355 		if (reader instanceof ObjectReuseAsIs)
356 			reuseSupport = ((ObjectReuseAsIs) reader);
357 		else
358 			reuseSupport = null;
359 
360 		deltaBaseAsOffset = config.isDeltaBaseAsOffset();
361 		reuseDeltas = config.isReuseDeltas();
362 		reuseValidate = true; // be paranoid by default
363 		stats = new PackStatistics.Accumulator();
364 		state = new MutableState();
365 		selfRef = new WeakReference<PackWriter>(this);
366 		instances.put(selfRef, Boolean.TRUE);
367 	}
368 
369 	/**
370 	 * Set the {@code ObjectCountCallback}.
371 	 * <p>
372 	 * It should be set before calling
373 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
374 	 *
375 	 * @param callback
376 	 *            the callback to set
377 	 *
378 	 * @return this object for chaining.
379 	 * @since 4.1
380 	 */
381 	public PackWriter setObjectCountCallback(ObjectCountCallback callback) {
382 		this.callback = callback;
383 		return this;
384 	}
385 
386 	/**
387 	 * Records the set of shallow commits in the client.
388 	 *
389 	 * @param clientShallowCommits
390 	 *            the shallow commits in the client
391 	 * @since 4.1
392 	 */
393 	public void setClientShallowCommits(Set<ObjectId> clientShallowCommits) {
394 		stats.clientShallowCommits = Collections
395 				.unmodifiableSet(new HashSet<ObjectId>(clientShallowCommits));
396 	}
397 
398 	/**
399 	 * Check whether writer can store delta base as an offset (new style
400 	 * reducing pack size) or should store it as an object id (legacy style,
401 	 * compatible with old readers).
402 	 *
403 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
404 	 *
405 	 * @return true if delta base is stored as an offset; false if it is stored
406 	 *         as an object id.
407 	 */
408 	public boolean isDeltaBaseAsOffset() {
409 		return deltaBaseAsOffset;
410 	}
411 
412 	/**
413 	 * Set writer delta base format. Delta base can be written as an offset in a
414 	 * pack file (new approach reducing file size) or as an object id (legacy
415 	 * approach, compatible with old readers).
416 	 *
417 	 * Default setting: {@value PackConfig#DEFAULT_DELTA_BASE_AS_OFFSET}
418 	 *
419 	 * @param deltaBaseAsOffset
420 	 *            boolean indicating whether delta base can be stored as an
421 	 *            offset.
422 	 */
423 	public void setDeltaBaseAsOffset(boolean deltaBaseAsOffset) {
424 		this.deltaBaseAsOffset = deltaBaseAsOffset;
425 	}
426 
427 	/**
428 	 * Check if the writer will reuse commits that are already stored as deltas.
429 	 *
430 	 * @return true if the writer would reuse commits stored as deltas, assuming
431 	 *         delta reuse is already enabled.
432 	 */
433 	public boolean isReuseDeltaCommits() {
434 		return reuseDeltaCommits;
435 	}
436 
437 	/**
438 	 * Set the writer to reuse existing delta versions of commits.
439 	 *
440 	 * @param reuse
441 	 *            if true, the writer will reuse any commits stored as deltas.
442 	 *            By default the writer does not reuse delta commits.
443 	 */
444 	public void setReuseDeltaCommits(boolean reuse) {
445 		reuseDeltaCommits = reuse;
446 	}
447 
448 	/**
449 	 * Check if the writer validates objects before copying them.
450 	 *
451 	 * @return true if validation is enabled; false if the reader will handle
452 	 *         object validation as a side-effect of it consuming the output.
453 	 */
454 	public boolean isReuseValidatingObjects() {
455 		return reuseValidate;
456 	}
457 
458 	/**
459 	 * Enable (or disable) object validation during packing.
460 	 *
461 	 * @param validate
462 	 *            if true the pack writer will validate an object before it is
463 	 *            put into the output. This additional validation work may be
464 	 *            necessary to avoid propagating corruption from one local pack
465 	 *            file to another local pack file.
466 	 */
467 	public void setReuseValidatingObjects(boolean validate) {
468 		reuseValidate = validate;
469 	}
470 
471 	/** @return true if this writer is producing a thin pack. */
472 	public boolean isThin() {
473 		return thin;
474 	}
475 
476 	/**
477 	 * @param packthin
478 	 *            a boolean indicating whether writer may pack objects with
479 	 *            delta base object not within set of objects to pack, but
480 	 *            belonging to party repository (uninteresting/boundary) as
481 	 *            determined by set; this kind of pack is used only for
482 	 *            transport; true - to produce thin pack, false - otherwise.
483 	 */
484 	public void setThin(final boolean packthin) {
485 		thin = packthin;
486 	}
487 
488 	/** @return true to reuse cached packs. If true index creation isn't available. */
489 	public boolean isUseCachedPacks() {
490 		return useCachedPacks;
491 	}
492 
493 	/**
494 	 * @param useCached
495 	 *            if set to true and a cached pack is present, it will be
496 	 *            appended onto the end of a thin-pack, reducing the amount of
497 	 *            working set space and CPU used by PackWriter. Enabling this
498 	 *            feature prevents PackWriter from creating an index for the
499 	 *            newly created pack, so its only suitable for writing to a
500 	 *            network client, where the client will make the index.
501 	 */
502 	public void setUseCachedPacks(boolean useCached) {
503 		useCachedPacks = useCached;
504 	}
505 
506 	/** @return true to use bitmaps for ObjectWalks, if available. */
507 	public boolean isUseBitmaps() {
508 		return useBitmaps;
509 	}
510 
511 	/**
512 	 * @param useBitmaps
513 	 *            if set to true, bitmaps will be used when preparing a pack.
514 	 */
515 	public void setUseBitmaps(boolean useBitmaps) {
516 		this.useBitmaps = useBitmaps;
517 	}
518 
519 	/** @return true if the index file cannot be created by this PackWriter. */
520 	public boolean isIndexDisabled() {
521 		return indexDisabled || !cachedPacks.isEmpty();
522 	}
523 
524 	/**
525 	 * @param noIndex
526 	 *            true to disable creation of the index file.
527 	 */
528 	public void setIndexDisabled(boolean noIndex) {
529 		this.indexDisabled = noIndex;
530 	}
531 
532 	/**
533 	 * @return true to ignore objects that are uninteresting and also not found
534 	 *         on local disk; false to throw a {@link MissingObjectException}
535 	 *         out of {@link #preparePack(ProgressMonitor, Set, Set)} if an
536 	 *         uninteresting object is not in the source repository. By default,
537 	 *         true, permitting gracefully ignoring of uninteresting objects.
538 	 */
539 	public boolean isIgnoreMissingUninteresting() {
540 		return ignoreMissingUninteresting;
541 	}
542 
543 	/**
544 	 * @param ignore
545 	 *            true if writer should ignore non existing uninteresting
546 	 *            objects during construction set of objects to pack; false
547 	 *            otherwise - non existing uninteresting objects may cause
548 	 *            {@link MissingObjectException}
549 	 */
550 	public void setIgnoreMissingUninteresting(final boolean ignore) {
551 		ignoreMissingUninteresting = ignore;
552 	}
553 
554 	/**
555 	 * Set the tag targets that should be hoisted earlier during packing.
556 	 * <p>
557 	 * Callers may put objects into this set before invoking any of the
558 	 * preparePack methods to influence where an annotated tag's target is
559 	 * stored within the resulting pack. Typically these will be clustered
560 	 * together, and hoisted earlier in the file even if they are ancient
561 	 * revisions, allowing readers to find tag targets with better locality.
562 	 *
563 	 * @param objects
564 	 *            objects that annotated tags point at.
565 	 */
566 	public void setTagTargets(Set<ObjectId> objects) {
567 		tagTargets = objects;
568 	}
569 
570 	/**
571 	 * Configure this pack for a shallow clone.
572 	 *
573 	 * @param depth
574 	 *            maximum depth to traverse the commit graph
575 	 * @param unshallow
576 	 *            objects which used to be shallow on the client, but are being
577 	 *            extended as part of this fetch
578 	 */
579 	public void setShallowPack(int depth,
580 			Collection<? extends ObjectId> unshallow) {
581 		this.shallowPack = true;
582 		this.depth = depth;
583 		this.unshallowObjects = unshallow;
584 	}
585 
586 	/**
587 	 * Returns objects number in a pack file that was created by this writer.
588 	 *
589 	 * @return number of objects in pack.
590 	 * @throws IOException
591 	 *             a cached pack cannot supply its object count.
592 	 */
593 	public long getObjectCount() throws IOException {
594 		if (stats.totalObjects == 0) {
595 			long objCnt = 0;
596 
597 			objCnt += objectsLists[OBJ_COMMIT].size();
598 			objCnt += objectsLists[OBJ_TREE].size();
599 			objCnt += objectsLists[OBJ_BLOB].size();
600 			objCnt += objectsLists[OBJ_TAG].size();
601 
602 			for (CachedPack pack : cachedPacks)
603 				objCnt += pack.getObjectCount();
604 			return objCnt;
605 		}
606 		return stats.totalObjects;
607 	}
608 
609 	/**
610 	 * Returns the object ids in the pack file that was created by this writer.
611 	 * <p>
612 	 * This method can only be invoked after
613 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
614 	 * been invoked and completed successfully.
615 	 *
616 	 * @return set of objects in pack.
617 	 * @throws IOException
618 	 *             a cached pack cannot supply its object ids.
619 	 */
620 	public ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> getObjectSet()
621 			throws IOException {
622 		if (!cachedPacks.isEmpty())
623 			throw new IOException(
624 					JGitText.get().cachedPacksPreventsListingObjects);
625 
626 		if (writeBitmaps != null) {
627 			return writeBitmaps.getObjectSet();
628 		}
629 
630 		ObjectIdOwnerMap<ObjectIdOwnerMap.Entry> r = new ObjectIdOwnerMap<>();
631 		for (BlockList<ObjectToPack> objList : objectsLists) {
632 			if (objList != null) {
633 				for (ObjectToPack otp : objList)
634 					r.add(new ObjectIdOwnerMap.Entry(otp) {
635 						// A new entry that copies the ObjectId
636 					});
637 			}
638 		}
639 		return r;
640 	}
641 
642 	/**
643 	 * Add a pack index whose contents should be excluded from the result.
644 	 *
645 	 * @param idx
646 	 *            objects in this index will not be in the output pack.
647 	 */
648 	public void excludeObjects(ObjectIdSet idx) {
649 		if (excludeInPacks == null) {
650 			excludeInPacks = new ObjectIdSet[] { idx };
651 			excludeInPackLast = idx;
652 		} else {
653 			int cnt = excludeInPacks.length;
654 			ObjectIdSet[] newList = new ObjectIdSet[cnt + 1];
655 			System.arraycopy(excludeInPacks, 0, newList, 0, cnt);
656 			newList[cnt] = idx;
657 			excludeInPacks = newList;
658 		}
659 	}
660 
661 	/**
662 	 * Prepare the list of objects to be written to the pack stream.
663 	 * <p>
664 	 * Iterator <b>exactly</b> determines which objects are included in a pack
665 	 * and order they appear in pack (except that objects order by type is not
666 	 * needed at input). This order should conform general rules of ordering
667 	 * objects in git - by recency and path (type and delta-base first is
668 	 * internally secured) and responsibility for guaranteeing this order is on
669 	 * a caller side. Iterator must return each id of object to write exactly
670 	 * once.
671 	 * </p>
672 	 *
673 	 * @param objectsSource
674 	 *            iterator of object to store in a pack; order of objects within
675 	 *            each type is important, ordering by type is not needed;
676 	 *            allowed types for objects are {@link Constants#OBJ_COMMIT},
677 	 *            {@link Constants#OBJ_TREE}, {@link Constants#OBJ_BLOB} and
678 	 *            {@link Constants#OBJ_TAG}; objects returned by iterator may be
679 	 *            later reused by caller as object id and type are internally
680 	 *            copied in each iteration.
681 	 * @throws IOException
682 	 *             when some I/O problem occur during reading objects.
683 	 */
684 	public void preparePack(final Iterator<RevObject> objectsSource)
685 			throws IOException {
686 		while (objectsSource.hasNext()) {
687 			addObject(objectsSource.next());
688 		}
689 	}
690 
691 	/**
692 	 * Prepare the list of objects to be written to the pack stream.
693 	 * <p>
694 	 * Basing on these 2 sets, another set of objects to put in a pack file is
695 	 * created: this set consists of all objects reachable (ancestors) from
696 	 * interesting objects, except uninteresting objects and their ancestors.
697 	 * This method uses class {@link ObjectWalk} extensively to find out that
698 	 * appropriate set of output objects and their optimal order in output pack.
699 	 * Order is consistent with general git in-pack rules: sort by object type,
700 	 * recency, path and delta-base first.
701 	 * </p>
702 	 *
703 	 * @param countingMonitor
704 	 *            progress during object enumeration.
705 	 * @param want
706 	 *            collection of objects to be marked as interesting (start
707 	 *            points of graph traversal).
708 	 * @param have
709 	 *            collection of objects to be marked as uninteresting (end
710 	 *            points of graph traversal).
711 	 * @throws IOException
712 	 *             when some I/O problem occur during reading objects.
713 	 */
714 	public void preparePack(ProgressMonitor countingMonitor,
715 			Set<? extends ObjectId> want,
716 			Set<? extends ObjectId> have) throws IOException {
717 		ObjectWalk ow;
718 		if (shallowPack)
719 			ow = new DepthWalk.ObjectWalk(reader, depth);
720 		else
721 			ow = new ObjectWalk(reader);
722 		preparePack(countingMonitor, ow, want, have);
723 	}
724 
725 	/**
726 	 * Prepare the list of objects to be written to the pack stream.
727 	 * <p>
728 	 * Basing on these 2 sets, another set of objects to put in a pack file is
729 	 * created: this set consists of all objects reachable (ancestors) from
730 	 * interesting objects, except uninteresting objects and their ancestors.
731 	 * This method uses class {@link ObjectWalk} extensively to find out that
732 	 * appropriate set of output objects and their optimal order in output pack.
733 	 * Order is consistent with general git in-pack rules: sort by object type,
734 	 * recency, path and delta-base first.
735 	 * </p>
736 	 *
737 	 * @param countingMonitor
738 	 *            progress during object enumeration.
739 	 * @param walk
740 	 *            ObjectWalk to perform enumeration.
741 	 * @param interestingObjects
742 	 *            collection of objects to be marked as interesting (start
743 	 *            points of graph traversal).
744 	 * @param uninterestingObjects
745 	 *            collection of objects to be marked as uninteresting (end
746 	 *            points of graph traversal).
747 	 * @throws IOException
748 	 *             when some I/O problem occur during reading objects.
749 	 */
750 	public void preparePack(ProgressMonitor countingMonitor,
751 			ObjectWalk walk,
752 			final Set<? extends ObjectId> interestingObjects,
753 			final Set<? extends ObjectId> uninterestingObjects)
754 			throws IOException {
755 		if (countingMonitor == null)
756 			countingMonitor = NullProgressMonitor.INSTANCE;
757 		if (shallowPack && !(walk instanceof DepthWalk.ObjectWalk))
758 			walk = new DepthWalk.ObjectWalk(reader, depth);
759 		findObjectsToPack(countingMonitor, walk, interestingObjects,
760 				uninterestingObjects);
761 	}
762 
763 	/**
764 	 * Determine if the pack file will contain the requested object.
765 	 *
766 	 * @param id
767 	 *            the object to test the existence of.
768 	 * @return true if the object will appear in the output pack file.
769 	 * @throws IOException
770 	 *             a cached pack cannot be examined.
771 	 */
772 	public boolean willInclude(final AnyObjectId id) throws IOException {
773 		ObjectToPack obj = objectsMap.get(id);
774 		return obj != null && !obj.isEdge();
775 	}
776 
777 	/**
778 	 * Lookup the ObjectToPack object for a given ObjectId.
779 	 *
780 	 * @param id
781 	 *            the object to find in the pack.
782 	 * @return the object we are packing, or null.
783 	 */
784 	public ObjectToPack get(AnyObjectId id) {
785 		ObjectToPack obj = objectsMap.get(id);
786 		return obj != null && !obj.isEdge() ? obj : null;
787 	}
788 
789 	/**
790 	 * Computes SHA-1 of lexicographically sorted objects ids written in this
791 	 * pack, as used to name a pack file in repository.
792 	 *
793 	 * @return ObjectId representing SHA-1 name of a pack that was created.
794 	 */
795 	public ObjectId computeName() {
796 		final byte[] buf = new byte[OBJECT_ID_LENGTH];
797 		final MessageDigest md = Constants.newMessageDigest();
798 		for (ObjectToPack otp : sortByName()) {
799 			otp.copyRawTo(buf, 0);
800 			md.update(buf, 0, OBJECT_ID_LENGTH);
801 		}
802 		return ObjectId.fromRaw(md.digest());
803 	}
804 
805 	/**
806 	 * Returns the index format version that will be written.
807 	 * <p>
808 	 * This method can only be invoked after
809 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)} has
810 	 * been invoked and completed successfully.
811 	 *
812 	 * @return the index format version.
813 	 */
814 	public int getIndexVersion() {
815 		int indexVersion = config.getIndexVersion();
816 		if (indexVersion <= 0) {
817 			for (BlockList<ObjectToPack> objs : objectsLists)
818 				indexVersion = Math.max(indexVersion,
819 						PackIndexWriter.oldestPossibleFormat(objs));
820 		}
821 		return indexVersion;
822 	}
823 
824 	/**
825 	 * Create an index file to match the pack file just written.
826 	 * <p>
827 	 * Called after
828 	 * {@link #writePack(ProgressMonitor, ProgressMonitor, OutputStream)}.
829 	 * <p>
830 	 * Writing an index is only required for local pack storage. Packs sent on
831 	 * the network do not need to create an index.
832 	 *
833 	 * @param indexStream
834 	 *            output for the index data. Caller is responsible for closing
835 	 *            this stream.
836 	 * @throws IOException
837 	 *             the index data could not be written to the supplied stream.
838 	 */
839 	public void writeIndex(final OutputStream indexStream) throws IOException {
840 		if (isIndexDisabled())
841 			throw new IOException(JGitText.get().cachedPacksPreventsIndexCreation);
842 
843 		long writeStart = System.currentTimeMillis();
844 		final PackIndexWriter iw = PackIndexWriter.createVersion(
845 				indexStream, getIndexVersion());
846 		iw.write(sortByName(), packcsum);
847 		stats.timeWriting += System.currentTimeMillis() - writeStart;
848 	}
849 
850 	/**
851 	 * Create a bitmap index file to match the pack file just written.
852 	 * <p>
853 	 * Called after {@link #prepareBitmapIndex(ProgressMonitor)}.
854 	 *
855 	 * @param bitmapIndexStream
856 	 *            output for the bitmap index data. Caller is responsible for
857 	 *            closing this stream.
858 	 * @throws IOException
859 	 *             the index data could not be written to the supplied stream.
860 	 */
861 	public void writeBitmapIndex(final OutputStream bitmapIndexStream)
862 			throws IOException {
863 		if (writeBitmaps == null)
864 			throw new IOException(JGitText.get().bitmapsMustBePrepared);
865 
866 		long writeStart = System.currentTimeMillis();
867 		final PackBitmapIndexWriterV1 iw = new PackBitmapIndexWriterV1(bitmapIndexStream);
868 		iw.write(writeBitmaps, packcsum);
869 		stats.timeWriting += System.currentTimeMillis() - writeStart;
870 	}
871 
872 	private List<ObjectToPack> sortByName() {
873 		if (sortedByName == null) {
874 			int cnt = 0;
875 			cnt += objectsLists[OBJ_COMMIT].size();
876 			cnt += objectsLists[OBJ_TREE].size();
877 			cnt += objectsLists[OBJ_BLOB].size();
878 			cnt += objectsLists[OBJ_TAG].size();
879 
880 			sortedByName = new BlockList<ObjectToPack>(cnt);
881 			sortedByName.addAll(objectsLists[OBJ_COMMIT]);
882 			sortedByName.addAll(objectsLists[OBJ_TREE]);
883 			sortedByName.addAll(objectsLists[OBJ_BLOB]);
884 			sortedByName.addAll(objectsLists[OBJ_TAG]);
885 			Collections.sort(sortedByName);
886 		}
887 		return sortedByName;
888 	}
889 
890 	private void beginPhase(PackingPhase phase, ProgressMonitor monitor,
891 			long cnt) {
892 		state.phase = phase;
893 		String task;
894 		switch (phase) {
895 		case COUNTING:
896 			task = JGitText.get().countingObjects;
897 			break;
898 		case GETTING_SIZES:
899 			task = JGitText.get().searchForSizes;
900 			break;
901 		case FINDING_SOURCES:
902 			task = JGitText.get().searchForReuse;
903 			break;
904 		case COMPRESSING:
905 			task = JGitText.get().compressingObjects;
906 			break;
907 		case WRITING:
908 			task = JGitText.get().writingObjects;
909 			break;
910 		case BUILDING_BITMAPS:
911 			task = JGitText.get().buildingBitmaps;
912 			break;
913 		default:
914 			throw new IllegalArgumentException(
915 					MessageFormat.format(JGitText.get().illegalPackingPhase, phase));
916 		}
917 		monitor.beginTask(task, (int) cnt);
918 	}
919 
920 	private void endPhase(ProgressMonitor monitor) {
921 		monitor.endTask();
922 	}
923 
924 	/**
925 	 * Write the prepared pack to the supplied stream.
926 	 * <p>
927 	 * Called after {@link #preparePack(ProgressMonitor, ObjectWalk, Set, Set)}
928 	 * or {@link #preparePack(ProgressMonitor, Set, Set)}.
929 	 * <p>
930 	 * Performs delta search if enabled and writes the pack stream.
931 	 * <p>
932 	 * All reused objects data checksum (Adler32/CRC32) is computed and
933 	 * validated against existing checksum.
934 	 *
935 	 * @param compressMonitor
936 	 *            progress monitor to report object compression work.
937 	 * @param writeMonitor
938 	 *            progress monitor to report the number of objects written.
939 	 * @param packStream
940 	 *            output stream of pack data. The stream should be buffered by
941 	 *            the caller. The caller is responsible for closing the stream.
942 	 * @throws IOException
943 	 *             an error occurred reading a local object's data to include in
944 	 *             the pack, or writing compressed object data to the output
945 	 *             stream.
946 	 * @throws WriteAbortedException
947 	 *             the write operation is aborted by {@link ObjectCountCallback}
948 	 *             .
949 	 */
950 	public void writePack(ProgressMonitor compressMonitor,
951 			ProgressMonitor writeMonitor, OutputStream packStream)
952 			throws IOException {
953 		if (compressMonitor == null)
954 			compressMonitor = NullProgressMonitor.INSTANCE;
955 		if (writeMonitor == null)
956 			writeMonitor = NullProgressMonitor.INSTANCE;
957 
958 		excludeInPacks = null;
959 		excludeInPackLast = null;
960 
961 		boolean needSearchForReuse = reuseSupport != null && (
962 				   reuseDeltas
963 				|| config.isReuseObjects()
964 				|| !cachedPacks.isEmpty());
965 
966 		if (compressMonitor instanceof BatchingProgressMonitor) {
967 			long delay = 1000;
968 			if (needSearchForReuse && config.isDeltaCompress())
969 				delay = 500;
970 			((BatchingProgressMonitor) compressMonitor).setDelayStart(
971 					delay,
972 					TimeUnit.MILLISECONDS);
973 		}
974 
975 		if (needSearchForReuse)
976 			searchForReuse(compressMonitor);
977 		if (config.isDeltaCompress())
978 			searchForDeltas(compressMonitor);
979 
980 		crc32 = new CRC32();
981 		final PackOutputStream out = new PackOutputStream(
982 			writeMonitor,
983 			isIndexDisabled()
984 				? packStream
985 				: new CheckedOutputStream(packStream, crc32),
986 			this);
987 
988 		long objCnt = getObjectCount();
989 		stats.totalObjects = objCnt;
990 		if (callback != null)
991 			callback.setObjectCount(objCnt);
992 		beginPhase(PackingPhase.WRITING, writeMonitor, objCnt);
993 		long writeStart = System.currentTimeMillis();
994 		try {
995 			out.writeFileHeader(PACK_VERSION_GENERATED, objCnt);
996 			out.flush();
997 
998 			writeObjects(out);
999 			if (!edgeObjects.isEmpty() || !cachedPacks.isEmpty()) {
1000 				for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1001 					if (typeStat == null)
1002 						continue;
1003 					stats.thinPackBytes += typeStat.bytes;
1004 				}
1005 			}
1006 
1007 			stats.reusedPacks = Collections.unmodifiableList(cachedPacks);
1008 			for (CachedPack pack : cachedPacks) {
1009 				long deltaCnt = pack.getDeltaCount();
1010 				stats.reusedObjects += pack.getObjectCount();
1011 				stats.reusedDeltas += deltaCnt;
1012 				stats.totalDeltas += deltaCnt;
1013 				reuseSupport.copyPackAsIs(out, pack);
1014 			}
1015 			writeChecksum(out);
1016 			out.flush();
1017 		} finally {
1018 			stats.timeWriting = System.currentTimeMillis() - writeStart;
1019 			stats.depth = depth;
1020 
1021 			for (PackStatistics.ObjectType.Accumulator typeStat : stats.objectTypes) {
1022 				if (typeStat == null)
1023 					continue;
1024 				typeStat.cntDeltas += typeStat.reusedDeltas;
1025 				stats.reusedObjects += typeStat.reusedObjects;
1026 				stats.reusedDeltas += typeStat.reusedDeltas;
1027 				stats.totalDeltas += typeStat.cntDeltas;
1028 			}
1029 		}
1030 
1031 		stats.totalBytes = out.length();
1032 		reader.close();
1033 		endPhase(writeMonitor);
1034 	}
1035 
1036 	/**
1037 	 * @return description of what this PackWriter did in order to create the
1038 	 *         final pack stream. This should only be invoked after the calls to
1039 	 *         create the pack/index/bitmap have completed.
1040 	 */
1041 	public PackStatistics getStatistics() {
1042 		return new PackStatistics(stats);
1043 	}
1044 
1045 	/** @return snapshot of the current state of this PackWriter. */
1046 	public State getState() {
1047 		return state.snapshot();
1048 	}
1049 
1050 	/**
1051 	 * Release all resources used by this writer.
1052 	 *
1053 	 * @since 4.0
1054 	 */
1055 	@Override
1056 	public void close() {
1057 		reader.close();
1058 		if (myDeflater != null) {
1059 			myDeflater.end();
1060 			myDeflater = null;
1061 		}
1062 		instances.remove(selfRef);
1063 	}
1064 
1065 	private void searchForReuse(ProgressMonitor monitor) throws IOException {
1066 		long cnt = 0;
1067 		cnt += objectsLists[OBJ_COMMIT].size();
1068 		cnt += objectsLists[OBJ_TREE].size();
1069 		cnt += objectsLists[OBJ_BLOB].size();
1070 		cnt += objectsLists[OBJ_TAG].size();
1071 
1072 		long start = System.currentTimeMillis();
1073 		beginPhase(PackingPhase.FINDING_SOURCES, monitor, cnt);
1074 		if (cnt <= 4096) {
1075 			// For small object counts, do everything as one list.
1076 			BlockList<ObjectToPack> tmp = new BlockList<ObjectToPack>((int) cnt);
1077 			tmp.addAll(objectsLists[OBJ_TAG]);
1078 			tmp.addAll(objectsLists[OBJ_COMMIT]);
1079 			tmp.addAll(objectsLists[OBJ_TREE]);
1080 			tmp.addAll(objectsLists[OBJ_BLOB]);
1081 			searchForReuse(monitor, tmp);
1082 			if (pruneCurrentObjectList) {
1083 				// If the list was pruned, we need to re-prune the main lists.
1084 				pruneEdgesFromObjectList(objectsLists[OBJ_COMMIT]);
1085 				pruneEdgesFromObjectList(objectsLists[OBJ_TREE]);
1086 				pruneEdgesFromObjectList(objectsLists[OBJ_BLOB]);
1087 				pruneEdgesFromObjectList(objectsLists[OBJ_TAG]);
1088 			}
1089 		} else {
1090 			searchForReuse(monitor, objectsLists[OBJ_TAG]);
1091 			searchForReuse(monitor, objectsLists[OBJ_COMMIT]);
1092 			searchForReuse(monitor, objectsLists[OBJ_TREE]);
1093 			searchForReuse(monitor, objectsLists[OBJ_BLOB]);
1094 		}
1095 		endPhase(monitor);
1096 		stats.timeSearchingForReuse = System.currentTimeMillis() - start;
1097 
1098 		if (config.isReuseDeltas() && config.getCutDeltaChains()) {
1099 			cutDeltaChains(objectsLists[OBJ_TREE]);
1100 			cutDeltaChains(objectsLists[OBJ_BLOB]);
1101 		}
1102 	}
1103 
1104 	private void searchForReuse(ProgressMonitor monitor, List<ObjectToPack> list)
1105 			throws IOException, MissingObjectException {
1106 		pruneCurrentObjectList = false;
1107 		reuseSupport.selectObjectRepresentation(this, monitor, list);
1108 		if (pruneCurrentObjectList)
1109 			pruneEdgesFromObjectList(list);
1110 	}
1111 
1112 	private void cutDeltaChains(BlockList<ObjectToPack> list)
1113 			throws IOException {
1114 		int max = config.getMaxDeltaDepth();
1115 		for (int idx = list.size() - 1; idx >= 0; idx--) {
1116 			int d = 0;
1117 			ObjectToPack b = list.get(idx).getDeltaBase();
1118 			while (b != null) {
1119 				if (d < b.getChainLength())
1120 					break;
1121 				b.setChainLength(++d);
1122 				if (d >= max && b.isDeltaRepresentation()) {
1123 					reselectNonDelta(b);
1124 					break;
1125 				}
1126 				b = b.getDeltaBase();
1127 			}
1128 		}
1129 		if (config.isDeltaCompress()) {
1130 			for (ObjectToPack otp : list)
1131 				otp.clearChainLength();
1132 		}
1133 	}
1134 
1135 	private void searchForDeltas(ProgressMonitor monitor)
1136 			throws MissingObjectException, IncorrectObjectTypeException,
1137 			IOException {
1138 		// Commits and annotated tags tend to have too many differences to
1139 		// really benefit from delta compression. Consequently just don't
1140 		// bother examining those types here.
1141 		//
1142 		ObjectToPack[] list = new ObjectToPack[
1143 				  objectsLists[OBJ_TREE].size()
1144 				+ objectsLists[OBJ_BLOB].size()
1145 				+ edgeObjects.size()];
1146 		int cnt = 0;
1147 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_TREE);
1148 		cnt = findObjectsNeedingDelta(list, cnt, OBJ_BLOB);
1149 		if (cnt == 0)
1150 			return;
1151 		int nonEdgeCnt = cnt;
1152 
1153 		// Queue up any edge objects that we might delta against.  We won't
1154 		// be sending these as we assume the other side has them, but we need
1155 		// them in the search phase below.
1156 		//
1157 		for (ObjectToPack eo : edgeObjects) {
1158 			eo.setWeight(0);
1159 			list[cnt++] = eo;
1160 		}
1161 
1162 		// Compute the sizes of the objects so we can do a proper sort.
1163 		// We let the reader skip missing objects if it chooses. For
1164 		// some readers this can be a huge win. We detect missing objects
1165 		// by having set the weights above to 0 and allowing the delta
1166 		// search code to discover the missing object and skip over it, or
1167 		// abort with an exception if we actually had to have it.
1168 		//
1169 		final long sizingStart = System.currentTimeMillis();
1170 		beginPhase(PackingPhase.GETTING_SIZES, monitor, cnt);
1171 		AsyncObjectSizeQueue<ObjectToPack> sizeQueue = reader.getObjectSize(
1172 				Arrays.<ObjectToPack> asList(list).subList(0, cnt), false);
1173 		try {
1174 			final long limit = Math.min(
1175 					config.getBigFileThreshold(),
1176 					Integer.MAX_VALUE);
1177 			for (;;) {
1178 				try {
1179 					if (!sizeQueue.next())
1180 						break;
1181 				} catch (MissingObjectException notFound) {
1182 					monitor.update(1);
1183 					if (ignoreMissingUninteresting) {
1184 						ObjectToPack otp = sizeQueue.getCurrent();
1185 						if (otp != null && otp.isEdge()) {
1186 							otp.setDoNotDelta();
1187 							continue;
1188 						}
1189 
1190 						otp = objectsMap.get(notFound.getObjectId());
1191 						if (otp != null && otp.isEdge()) {
1192 							otp.setDoNotDelta();
1193 							continue;
1194 						}
1195 					}
1196 					throw notFound;
1197 				}
1198 
1199 				ObjectToPack otp = sizeQueue.getCurrent();
1200 				if (otp == null)
1201 					otp = objectsMap.get(sizeQueue.getObjectId());
1202 
1203 				long sz = sizeQueue.getSize();
1204 				if (DeltaIndex.BLKSZ < sz && sz < limit)
1205 					otp.setWeight((int) sz);
1206 				else
1207 					otp.setDoNotDelta(); // too small, or too big
1208 				monitor.update(1);
1209 			}
1210 		} finally {
1211 			sizeQueue.release();
1212 		}
1213 		endPhase(monitor);
1214 		stats.timeSearchingForSizes = System.currentTimeMillis() - sizingStart;
1215 
1216 		// Sort the objects by path hash so like files are near each other,
1217 		// and then by size descending so that bigger files are first. This
1218 		// applies "Linus' Law" which states that newer files tend to be the
1219 		// bigger ones, because source files grow and hardly ever shrink.
1220 		//
1221 		Arrays.sort(list, 0, cnt, new Comparator<ObjectToPack>() {
1222 			public int compare(ObjectToPack a, ObjectToPack b) {
1223 				int cmp = (a.isDoNotDelta() ? 1 : 0)
1224 						- (b.isDoNotDelta() ? 1 : 0);
1225 				if (cmp != 0)
1226 					return cmp;
1227 
1228 				cmp = a.getType() - b.getType();
1229 				if (cmp != 0)
1230 					return cmp;
1231 
1232 				cmp = (a.getPathHash() >>> 1) - (b.getPathHash() >>> 1);
1233 				if (cmp != 0)
1234 					return cmp;
1235 
1236 				cmp = (a.getPathHash() & 1) - (b.getPathHash() & 1);
1237 				if (cmp != 0)
1238 					return cmp;
1239 
1240 				cmp = (a.isEdge() ? 0 : 1) - (b.isEdge() ? 0 : 1);
1241 				if (cmp != 0)
1242 					return cmp;
1243 
1244 				return b.getWeight() - a.getWeight();
1245 			}
1246 		});
1247 
1248 		// Above we stored the objects we cannot delta onto the end.
1249 		// Remove them from the list so we don't waste time on them.
1250 		while (0 < cnt && list[cnt - 1].isDoNotDelta()) {
1251 			if (!list[cnt - 1].isEdge())
1252 				nonEdgeCnt--;
1253 			cnt--;
1254 		}
1255 		if (cnt == 0)
1256 			return;
1257 
1258 		final long searchStart = System.currentTimeMillis();
1259 		searchForDeltas(monitor, list, cnt);
1260 		stats.deltaSearchNonEdgeObjects = nonEdgeCnt;
1261 		stats.timeCompressing = System.currentTimeMillis() - searchStart;
1262 
1263 		for (int i = 0; i < cnt; i++)
1264 			if (!list[i].isEdge() && list[i].isDeltaRepresentation())
1265 				stats.deltasFound++;
1266 	}
1267 
1268 	private int findObjectsNeedingDelta(ObjectToPack[] list, int cnt, int type) {
1269 		for (ObjectToPack otp : objectsLists[type]) {
1270 			if (otp.isDoNotDelta()) // delta is disabled for this path
1271 				continue;
1272 			if (otp.isDeltaRepresentation()) // already reusing a delta
1273 				continue;
1274 			otp.setWeight(0);
1275 			list[cnt++] = otp;
1276 		}
1277 		return cnt;
1278 	}
1279 
1280 	private void reselectNonDelta(ObjectToPack otp) throws IOException {
1281 		otp.clearDeltaBase();
1282 		otp.clearReuseAsIs();
1283 		boolean old = reuseDeltas;
1284 		reuseDeltas = false;
1285 		reuseSupport.selectObjectRepresentation(this,
1286 				NullProgressMonitor.INSTANCE,
1287 				Collections.singleton(otp));
1288 		reuseDeltas = old;
1289 	}
1290 
1291 	private void searchForDeltas(final ProgressMonitor monitor,
1292 			final ObjectToPack[] list, final int cnt)
1293 			throws MissingObjectException, IncorrectObjectTypeException,
1294 			LargeObjectException, IOException {
1295 		int threads = config.getThreads();
1296 		if (threads == 0)
1297 			threads = Runtime.getRuntime().availableProcessors();
1298 		if (threads <= 1 || cnt <= config.getDeltaSearchWindowSize())
1299 			singleThreadDeltaSearch(monitor, list, cnt);
1300 		else
1301 			parallelDeltaSearch(monitor, list, cnt, threads);
1302 	}
1303 
1304 	private void singleThreadDeltaSearch(ProgressMonitor monitor,
1305 			ObjectToPack[] list, int cnt) throws IOException {
1306 		long totalWeight = 0;
1307 		for (int i = 0; i < cnt; i++) {
1308 			ObjectToPack o = list[i];
1309 			if (!o.isEdge() && !o.doNotAttemptDelta())
1310 				totalWeight += o.getWeight();
1311 		}
1312 
1313 		long bytesPerUnit = 1;
1314 		while (DeltaTask.MAX_METER <= (totalWeight / bytesPerUnit))
1315 			bytesPerUnit <<= 10;
1316 		int cost = (int) (totalWeight / bytesPerUnit);
1317 		if (totalWeight % bytesPerUnit != 0)
1318 			cost++;
1319 
1320 		beginPhase(PackingPhase.COMPRESSING, monitor, cost);
1321 		new DeltaWindow(config, new DeltaCache(config), reader,
1322 				monitor, bytesPerUnit,
1323 				list, 0, cnt).search();
1324 		endPhase(monitor);
1325 	}
1326 
1327 	private void parallelDeltaSearch(ProgressMonitor monitor,
1328 			ObjectToPack[] list, int cnt, int threads) throws IOException {
1329 		DeltaCache dc = new ThreadSafeDeltaCache(config);
1330 		ThreadSafeProgressMonitor pm = new ThreadSafeProgressMonitor(monitor);
1331 		DeltaTask.Block taskBlock = new DeltaTask.Block(threads, config,
1332 				reader, dc, pm,
1333 				list, 0, cnt);
1334 		taskBlock.partitionTasks();
1335 		beginPhase(PackingPhase.COMPRESSING, monitor, taskBlock.cost());
1336 		pm.startWorkers(taskBlock.tasks.size());
1337 
1338 		Executor executor = config.getExecutor();
1339 		final List<Throwable> errors =
1340 				Collections.synchronizedList(new ArrayList<Throwable>(threads));
1341 		if (executor instanceof ExecutorService) {
1342 			// Caller supplied us a service, use it directly.
1343 			runTasks((ExecutorService) executor, pm, taskBlock, errors);
1344 		} else if (executor == null) {
1345 			// Caller didn't give us a way to run the tasks, spawn up a
1346 			// temporary thread pool and make sure it tears down cleanly.
1347 			ExecutorService pool = Executors.newFixedThreadPool(threads);
1348 			try {
1349 				runTasks(pool, pm, taskBlock, errors);
1350 			} finally {
1351 				pool.shutdown();
1352 				for (;;) {
1353 					try {
1354 						if (pool.awaitTermination(60, TimeUnit.SECONDS))
1355 							break;
1356 					} catch (InterruptedException e) {
1357 						throw new IOException(
1358 								JGitText.get().packingCancelledDuringObjectsWriting);
1359 					}
1360 				}
1361 			}
1362 		} else {
1363 			// The caller gave us an executor, but it might not do
1364 			// asynchronous execution.  Wrap everything and hope it
1365 			// can schedule these for us.
1366 			for (final DeltaTask task : taskBlock.tasks) {
1367 				executor.execute(new Runnable() {
1368 					public void run() {
1369 						try {
1370 							task.call();
1371 						} catch (Throwable failure) {
1372 							errors.add(failure);
1373 						}
1374 					}
1375 				});
1376 			}
1377 			try {
1378 				pm.waitForCompletion();
1379 			} catch (InterruptedException ie) {
1380 				// We can't abort the other tasks as we have no handle.
1381 				// Cross our fingers and just break out anyway.
1382 				//
1383 				throw new IOException(
1384 						JGitText.get().packingCancelledDuringObjectsWriting);
1385 			}
1386 		}
1387 
1388 		// If any task threw an error, try to report it back as
1389 		// though we weren't using a threaded search algorithm.
1390 		//
1391 		if (!errors.isEmpty()) {
1392 			Throwable err = errors.get(0);
1393 			if (err instanceof Error)
1394 				throw (Error) err;
1395 			if (err instanceof RuntimeException)
1396 				throw (RuntimeException) err;
1397 			if (err instanceof IOException)
1398 				throw (IOException) err;
1399 
1400 			IOException fail = new IOException(err.getMessage());
1401 			fail.initCause(err);
1402 			throw fail;
1403 		}
1404 		endPhase(monitor);
1405 	}
1406 
1407 	private static void runTasks(ExecutorService pool,
1408 			ThreadSafeProgressMonitor pm,
1409 			DeltaTask.Block tb, List<Throwable> errors) throws IOException {
1410 		List<Future<?>> futures = new ArrayList<Future<?>>(tb.tasks.size());
1411 		for (DeltaTask task : tb.tasks)
1412 			futures.add(pool.submit(task));
1413 
1414 		try {
1415 			pm.waitForCompletion();
1416 			for (Future<?> f : futures) {
1417 				try {
1418 					f.get();
1419 				} catch (ExecutionException failed) {
1420 					errors.add(failed.getCause());
1421 				}
1422 			}
1423 		} catch (InterruptedException ie) {
1424 			for (Future<?> f : futures)
1425 				f.cancel(true);
1426 			throw new IOException(
1427 					JGitText.get().packingCancelledDuringObjectsWriting);
1428 		}
1429 	}
1430 
1431 	private void writeObjects(PackOutputStream out) throws IOException {
1432 		writeObjects(out, objectsLists[OBJ_COMMIT]);
1433 		writeObjects(out, objectsLists[OBJ_TAG]);
1434 		writeObjects(out, objectsLists[OBJ_TREE]);
1435 		writeObjects(out, objectsLists[OBJ_BLOB]);
1436 	}
1437 
1438 	private void writeObjects(PackOutputStream out, List<ObjectToPack> list)
1439 			throws IOException {
1440 		if (list.isEmpty())
1441 			return;
1442 
1443 		typeStats = stats.objectTypes[list.get(0).getType()];
1444 		long beginOffset = out.length();
1445 
1446 		if (reuseSupport != null) {
1447 			reuseSupport.writeObjects(out, list);
1448 		} else {
1449 			for (ObjectToPack otp : list)
1450 				out.writeObject(otp);
1451 		}
1452 
1453 		typeStats.bytes += out.length() - beginOffset;
1454 		typeStats.cntObjects = list.size();
1455 	}
1456 
1457 	void writeObject(PackOutputStream out, ObjectToPack otp) throws IOException {
1458 		if (!otp.isWritten())
1459 			writeObjectImpl(out, otp);
1460 	}
1461 
1462 	private void writeObjectImpl(PackOutputStream out, ObjectToPack otp)
1463 			throws IOException {
1464 		if (otp.wantWrite()) {
1465 			// A cycle exists in this delta chain. This should only occur if a
1466 			// selected object representation disappeared during writing
1467 			// (for example due to a concurrent repack) and a different base
1468 			// was chosen, forcing a cycle. Select something other than a
1469 			// delta, and write this object.
1470 			reselectNonDelta(otp);
1471 		}
1472 		otp.markWantWrite();
1473 
1474 		while (otp.isReuseAsIs()) {
1475 			writeBase(out, otp.getDeltaBase());
1476 			if (otp.isWritten())
1477 				return; // Delta chain cycle caused this to write already.
1478 
1479 			crc32.reset();
1480 			otp.setOffset(out.length());
1481 			try {
1482 				reuseSupport.copyObjectAsIs(out, otp, reuseValidate);
1483 				out.endObject();
1484 				otp.setCRC((int) crc32.getValue());
1485 				typeStats.reusedObjects++;
1486 				if (otp.isDeltaRepresentation()) {
1487 					typeStats.reusedDeltas++;
1488 					typeStats.deltaBytes += out.length() - otp.getOffset();
1489 				}
1490 				return;
1491 			} catch (StoredObjectRepresentationNotAvailableException gone) {
1492 				if (otp.getOffset() == out.length()) {
1493 					otp.setOffset(0);
1494 					otp.clearDeltaBase();
1495 					otp.clearReuseAsIs();
1496 					reuseSupport.selectObjectRepresentation(this,
1497 							NullProgressMonitor.INSTANCE,
1498 							Collections.singleton(otp));
1499 					continue;
1500 				} else {
1501 					// Object writing already started, we cannot recover.
1502 					//
1503 					CorruptObjectException coe;
1504 					coe = new CorruptObjectException(otp, ""); //$NON-NLS-1$
1505 					coe.initCause(gone);
1506 					throw coe;
1507 				}
1508 			}
1509 		}
1510 
1511 		// If we reached here, reuse wasn't possible.
1512 		//
1513 		if (otp.isDeltaRepresentation())
1514 			writeDeltaObjectDeflate(out, otp);
1515 		else
1516 			writeWholeObjectDeflate(out, otp);
1517 		out.endObject();
1518 		otp.setCRC((int) crc32.getValue());
1519 	}
1520 
1521 	private void writeBase(PackOutputStream out, ObjectToPack base)
1522 			throws IOException {
1523 		if (base != null && !base.isWritten() && !base.isEdge())
1524 			writeObjectImpl(out, base);
1525 	}
1526 
1527 	private void writeWholeObjectDeflate(PackOutputStream out,
1528 			final ObjectToPack otp) throws IOException {
1529 		final Deflater deflater = deflater();
1530 		final ObjectLoader ldr = reader.open(otp, otp.getType());
1531 
1532 		crc32.reset();
1533 		otp.setOffset(out.length());
1534 		out.writeHeader(otp, ldr.getSize());
1535 
1536 		deflater.reset();
1537 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1538 		ldr.copyTo(dst);
1539 		dst.finish();
1540 	}
1541 
1542 	private void writeDeltaObjectDeflate(PackOutputStream out,
1543 			final ObjectToPack otp) throws IOException {
1544 		writeBase(out, otp.getDeltaBase());
1545 
1546 		crc32.reset();
1547 		otp.setOffset(out.length());
1548 
1549 		DeltaCache.Ref ref = otp.popCachedDelta();
1550 		if (ref != null) {
1551 			byte[] zbuf = ref.get();
1552 			if (zbuf != null) {
1553 				out.writeHeader(otp, otp.getCachedSize());
1554 				out.write(zbuf);
1555 				return;
1556 			}
1557 		}
1558 
1559 		TemporaryBuffer.Heap delta = delta(otp);
1560 		out.writeHeader(otp, delta.length());
1561 
1562 		Deflater deflater = deflater();
1563 		deflater.reset();
1564 		DeflaterOutputStream dst = new DeflaterOutputStream(out, deflater);
1565 		delta.writeTo(dst, null);
1566 		dst.finish();
1567 		typeStats.cntDeltas++;
1568 		typeStats.deltaBytes += out.length() - otp.getOffset();
1569 	}
1570 
1571 	private TemporaryBuffer.Heap delta(final ObjectToPack otp)
1572 			throws IOException {
1573 		DeltaIndex index = new DeltaIndex(buffer(otp.getDeltaBaseId()));
1574 		byte[] res = buffer(otp);
1575 
1576 		// We never would have proposed this pair if the delta would be
1577 		// larger than the unpacked version of the object. So using it
1578 		// as our buffer limit is valid: we will never reach it.
1579 		//
1580 		TemporaryBuffer.Heap delta = new TemporaryBuffer.Heap(res.length);
1581 		index.encode(delta, res);
1582 		return delta;
1583 	}
1584 
1585 	private byte[] buffer(AnyObjectId objId) throws IOException {
1586 		return buffer(config, reader, objId);
1587 	}
1588 
1589 	static byte[] buffer(PackConfig config, ObjectReader or, AnyObjectId objId)
1590 			throws IOException {
1591 		// PackWriter should have already pruned objects that
1592 		// are above the big file threshold, so our chances of
1593 		// the object being below it are very good. We really
1594 		// shouldn't be here, unless the implementation is odd.
1595 
1596 		return or.open(objId).getCachedBytes(config.getBigFileThreshold());
1597 	}
1598 
1599 	private Deflater deflater() {
1600 		if (myDeflater == null)
1601 			myDeflater = new Deflater(config.getCompressionLevel());
1602 		return myDeflater;
1603 	}
1604 
1605 	private void writeChecksum(PackOutputStream out) throws IOException {
1606 		packcsum = out.getDigest();
1607 		out.write(packcsum);
1608 	}
1609 
1610 	private void findObjectsToPack(final ProgressMonitor countingMonitor,
1611 			final ObjectWalk walker, final Set<? extends ObjectId> want,
1612 			Set<? extends ObjectId> have)
1613 			throws MissingObjectException, IOException,
1614 			IncorrectObjectTypeException {
1615 		final long countingStart = System.currentTimeMillis();
1616 		beginPhase(PackingPhase.COUNTING, countingMonitor, ProgressMonitor.UNKNOWN);
1617 
1618 		if (have == null)
1619 			have = Collections.emptySet();
1620 
1621 		stats.interestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(want));
1622 		stats.uninterestingObjects = Collections.unmodifiableSet(new HashSet<ObjectId>(have));
1623 
1624 		canBuildBitmaps = config.isBuildBitmaps()
1625 				&& !shallowPack
1626 				&& have.isEmpty()
1627 				&& (excludeInPacks == null || excludeInPacks.length == 0);
1628 		if (!shallowPack && useBitmaps) {
1629 			BitmapIndex bitmapIndex = reader.getBitmapIndex();
1630 			if (bitmapIndex != null) {
1631 				PackWriterBitmapWalker bitmapWalker = new PackWriterBitmapWalker(
1632 						walker, bitmapIndex, countingMonitor);
1633 				findObjectsToPackUsingBitmaps(bitmapWalker, want, have);
1634 				endPhase(countingMonitor);
1635 				stats.timeCounting = System.currentTimeMillis() - countingStart;
1636 				stats.bitmapIndexMisses = bitmapWalker.getCountOfBitmapIndexMisses();
1637 				return;
1638 			}
1639 		}
1640 
1641 		List<ObjectId> all = new ArrayList<ObjectId>(want.size() + have.size());
1642 		all.addAll(want);
1643 		all.addAll(have);
1644 
1645 		final RevFlag include = walker.newFlag("include"); //$NON-NLS-1$
1646 		final RevFlag added = walker.newFlag("added"); //$NON-NLS-1$
1647 
1648 		walker.carry(include);
1649 
1650 		int haveEst = have.size();
1651 		if (have.isEmpty()) {
1652 			walker.sort(RevSort.COMMIT_TIME_DESC);
1653 		} else {
1654 			walker.sort(RevSort.TOPO);
1655 			if (thin)
1656 				walker.sort(RevSort.BOUNDARY, true);
1657 		}
1658 
1659 		List<RevObject> wantObjs = new ArrayList<RevObject>(want.size());
1660 		List<RevObject> haveObjs = new ArrayList<RevObject>(haveEst);
1661 		List<RevTag> wantTags = new ArrayList<RevTag>(want.size());
1662 
1663 		AsyncRevObjectQueue q = walker.parseAny(all, true);
1664 		try {
1665 			for (;;) {
1666 				try {
1667 					RevObject o = q.next();
1668 					if (o == null)
1669 						break;
1670 					if (have.contains(o))
1671 						haveObjs.add(o);
1672 					if (want.contains(o)) {
1673 						o.add(include);
1674 						wantObjs.add(o);
1675 						if (o instanceof RevTag)
1676 							wantTags.add((RevTag) o);
1677 					}
1678 				} catch (MissingObjectException e) {
1679 					if (ignoreMissingUninteresting
1680 							&& have.contains(e.getObjectId()))
1681 						continue;
1682 					throw e;
1683 				}
1684 			}
1685 		} finally {
1686 			q.release();
1687 		}
1688 
1689 		if (!wantTags.isEmpty()) {
1690 			all = new ArrayList<ObjectId>(wantTags.size());
1691 			for (RevTag tag : wantTags)
1692 				all.add(tag.getObject());
1693 			q = walker.parseAny(all, true);
1694 			try {
1695 				while (q.next() != null) {
1696 					// Just need to pop the queue item to parse the object.
1697 				}
1698 			} finally {
1699 				q.release();
1700 			}
1701 		}
1702 
1703 		if (walker instanceof DepthWalk.ObjectWalk) {
1704 			DepthWalk.ObjectWalk depthWalk = (DepthWalk.ObjectWalk) walker;
1705 			for (RevObject obj : wantObjs)
1706 				depthWalk.markRoot(obj);
1707 			if (unshallowObjects != null) {
1708 				for (ObjectId id : unshallowObjects)
1709 					depthWalk.markUnshallow(walker.parseAny(id));
1710 			}
1711 		} else {
1712 			for (RevObject obj : wantObjs)
1713 				walker.markStart(obj);
1714 		}
1715 		for (RevObject obj : haveObjs)
1716 			walker.markUninteresting(obj);
1717 
1718 		final int maxBases = config.getDeltaSearchWindowSize();
1719 		Set<RevTree> baseTrees = new HashSet<RevTree>();
1720 		BlockList<RevCommit> commits = new BlockList<RevCommit>();
1721 		Set<ObjectId> roots = new HashSet<>();
1722 		RevCommit c;
1723 		while ((c = walker.next()) != null) {
1724 			if (exclude(c))
1725 				continue;
1726 			if (c.has(RevFlag.UNINTERESTING)) {
1727 				if (baseTrees.size() <= maxBases)
1728 					baseTrees.add(c.getTree());
1729 				continue;
1730 			}
1731 
1732 			commits.add(c);
1733 			if (c.getParentCount() == 0) {
1734 				roots.add(c.copy());
1735 			}
1736 			countingMonitor.update(1);
1737 		}
1738 		stats.rootCommits = Collections.unmodifiableSet(roots);
1739 
1740 		if (shallowPack) {
1741 			for (RevCommit cmit : commits) {
1742 				addObject(cmit, 0);
1743 			}
1744 		} else {
1745 			int commitCnt = 0;
1746 			boolean putTagTargets = false;
1747 			for (RevCommit cmit : commits) {
1748 				if (!cmit.has(added)) {
1749 					cmit.add(added);
1750 					addObject(cmit, 0);
1751 					commitCnt++;
1752 				}
1753 
1754 				for (int i = 0; i < cmit.getParentCount(); i++) {
1755 					RevCommit p = cmit.getParent(i);
1756 					if (!p.has(added) && !p.has(RevFlag.UNINTERESTING)
1757 							&& !exclude(p)) {
1758 						p.add(added);
1759 						addObject(p, 0);
1760 						commitCnt++;
1761 					}
1762 				}
1763 
1764 				if (!putTagTargets && 4096 < commitCnt) {
1765 					for (ObjectId id : tagTargets) {
1766 						RevObject obj = walker.lookupOrNull(id);
1767 						if (obj instanceof RevCommit
1768 								&& obj.has(include)
1769 								&& !obj.has(RevFlag.UNINTERESTING)
1770 								&& !obj.has(added)) {
1771 							obj.add(added);
1772 							addObject(obj, 0);
1773 						}
1774 					}
1775 					putTagTargets = true;
1776 				}
1777 			}
1778 		}
1779 		commits = null;
1780 
1781 		if (thin && !baseTrees.isEmpty()) {
1782 			BaseSearch bases = new BaseSearch(countingMonitor, baseTrees, //
1783 					objectsMap, edgeObjects, reader);
1784 			RevObject o;
1785 			while ((o = walker.nextObject()) != null) {
1786 				if (o.has(RevFlag.UNINTERESTING))
1787 					continue;
1788 				if (exclude(o))
1789 					continue;
1790 
1791 				int pathHash = walker.getPathHashCode();
1792 				byte[] pathBuf = walker.getPathBuffer();
1793 				int pathLen = walker.getPathLength();
1794 				bases.addBase(o.getType(), pathBuf, pathLen, pathHash);
1795 				addObject(o, pathHash);
1796 				countingMonitor.update(1);
1797 			}
1798 		} else {
1799 			RevObject o;
1800 			while ((o = walker.nextObject()) != null) {
1801 				if (o.has(RevFlag.UNINTERESTING))
1802 					continue;
1803 				if (exclude(o))
1804 					continue;
1805 				addObject(o, walker.getPathHashCode());
1806 				countingMonitor.update(1);
1807 			}
1808 		}
1809 
1810 		for (CachedPack pack : cachedPacks)
1811 			countingMonitor.update((int) pack.getObjectCount());
1812 		endPhase(countingMonitor);
1813 		stats.timeCounting = System.currentTimeMillis() - countingStart;
1814 		stats.bitmapIndexMisses = -1;
1815 	}
1816 
1817 	private void findObjectsToPackUsingBitmaps(
1818 			PackWriterBitmapWalker bitmapWalker, Set<? extends ObjectId> want,
1819 			Set<? extends ObjectId> have)
1820 			throws MissingObjectException, IncorrectObjectTypeException,
1821 			IOException {
1822 		BitmapBuilder haveBitmap = bitmapWalker.findObjects(have, null, true);
1823 		bitmapWalker.reset();
1824 		BitmapBuilder wantBitmap = bitmapWalker.findObjects(want, haveBitmap,
1825 				false);
1826 		BitmapBuilder needBitmap = wantBitmap.andNot(haveBitmap);
1827 
1828 		if (useCachedPacks && reuseSupport != null && !reuseValidate
1829 				&& (excludeInPacks == null || excludeInPacks.length == 0))
1830 			cachedPacks.addAll(
1831 					reuseSupport.getCachedPacksAndUpdate(needBitmap));
1832 
1833 		for (BitmapObject obj : needBitmap) {
1834 			ObjectId objectId = obj.getObjectId();
1835 			if (exclude(objectId)) {
1836 				needBitmap.remove(objectId);
1837 				continue;
1838 			}
1839 			addObject(objectId, obj.getType(), 0);
1840 		}
1841 
1842 		if (thin)
1843 			haveObjects = haveBitmap;
1844 	}
1845 
1846 	private static void pruneEdgesFromObjectList(List<ObjectToPack> list) {
1847 		final int size = list.size();
1848 		int src = 0;
1849 		int dst = 0;
1850 
1851 		for (; src < size; src++) {
1852 			ObjectToPack obj = list.get(src);
1853 			if (obj.isEdge())
1854 				continue;
1855 			if (dst != src)
1856 				list.set(dst, obj);
1857 			dst++;
1858 		}
1859 
1860 		while (dst < list.size())
1861 			list.remove(list.size() - 1);
1862 	}
1863 
1864 	/**
1865 	 * Include one object to the output file.
1866 	 * <p>
1867 	 * Objects are written in the order they are added. If the same object is
1868 	 * added twice, it may be written twice, creating a larger than necessary
1869 	 * file.
1870 	 *
1871 	 * @param object
1872 	 *            the object to add.
1873 	 * @throws IncorrectObjectTypeException
1874 	 *             the object is an unsupported type.
1875 	 */
1876 	public void addObject(final RevObject object)
1877 			throws IncorrectObjectTypeException {
1878 		if (!exclude(object))
1879 			addObject(object, 0);
1880 	}
1881 
1882 	private void addObject(final RevObject object, final int pathHashCode) {
1883 		addObject(object, object.getType(), pathHashCode);
1884 	}
1885 
1886 	private void addObject(
1887 			final AnyObjectId src, final int type, final int pathHashCode) {
1888 		final ObjectToPack otp;
1889 		if (reuseSupport != null)
1890 			otp = reuseSupport.newObjectToPack(src, type);
1891 		else
1892 			otp = new ObjectToPack(src, type);
1893 		otp.setPathHash(pathHashCode);
1894 		objectsLists[type].add(otp);
1895 		objectsMap.add(otp);
1896 	}
1897 
1898 	private boolean exclude(AnyObjectId objectId) {
1899 		if (excludeInPacks == null)
1900 			return false;
1901 		if (excludeInPackLast.contains(objectId))
1902 			return true;
1903 		for (ObjectIdSet idx : excludeInPacks) {
1904 			if (idx.contains(objectId)) {
1905 				excludeInPackLast = idx;
1906 				return true;
1907 			}
1908 		}
1909 		return false;
1910 	}
1911 
1912 	/**
1913 	 * Select an object representation for this writer.
1914 	 * <p>
1915 	 * An {@link ObjectReader} implementation should invoke this method once for
1916 	 * each representation available for an object, to allow the writer to find
1917 	 * the most suitable one for the output.
1918 	 *
1919 	 * @param otp
1920 	 *            the object being packed.
1921 	 * @param next
1922 	 *            the next available representation from the repository.
1923 	 */
1924 	public void select(ObjectToPack otp, StoredObjectRepresentation next) {
1925 		int nFmt = next.getFormat();
1926 
1927 		if (!cachedPacks.isEmpty()) {
1928 			if (otp.isEdge())
1929 				return;
1930 			if ((nFmt == PACK_WHOLE) | (nFmt == PACK_DELTA)) {
1931 				for (CachedPack pack : cachedPacks) {
1932 					if (pack.hasObject(otp, next)) {
1933 						otp.setEdge();
1934 						otp.clearDeltaBase();
1935 						otp.clearReuseAsIs();
1936 						pruneCurrentObjectList = true;
1937 						return;
1938 					}
1939 				}
1940 			}
1941 		}
1942 
1943 		if (nFmt == PACK_DELTA && reuseDeltas && reuseDeltaFor(otp)) {
1944 			ObjectId baseId = next.getDeltaBase();
1945 			ObjectToPack ptr = objectsMap.get(baseId);
1946 			if (ptr != null && !ptr.isEdge()) {
1947 				otp.setDeltaBase(ptr);
1948 				otp.setReuseAsIs();
1949 			} else if (thin && have(ptr, baseId)) {
1950 				otp.setDeltaBase(baseId);
1951 				otp.setReuseAsIs();
1952 			} else {
1953 				otp.clearDeltaBase();
1954 				otp.clearReuseAsIs();
1955 			}
1956 		} else if (nFmt == PACK_WHOLE && config.isReuseObjects()) {
1957 			int nWeight = next.getWeight();
1958 			if (otp.isReuseAsIs() && !otp.isDeltaRepresentation()) {
1959 				// We've chosen another PACK_WHOLE format for this object,
1960 				// choose the one that has the smaller compressed size.
1961 				//
1962 				if (otp.getWeight() <= nWeight)
1963 					return;
1964 			}
1965 			otp.clearDeltaBase();
1966 			otp.setReuseAsIs();
1967 			otp.setWeight(nWeight);
1968 		} else {
1969 			otp.clearDeltaBase();
1970 			otp.clearReuseAsIs();
1971 		}
1972 
1973 		otp.setDeltaAttempted(reuseDeltas & next.wasDeltaAttempted());
1974 		otp.select(next);
1975 	}
1976 
1977 	private final boolean have(ObjectToPack ptr, AnyObjectId objectId) {
1978 		return (ptr != null && ptr.isEdge())
1979 				|| (haveObjects != null && haveObjects.contains(objectId));
1980 	}
1981 
1982 	/**
1983 	 * Prepares the bitmaps to be written to the bitmap index file.
1984 	 * <p>
1985 	 * Bitmaps can be used to speed up fetches and clones by storing the entire
1986 	 * object graph at selected commits. Writing a bitmap index is an optional
1987 	 * feature that not all pack users may require.
1988 	 * <p>
1989 	 * Called after {@link #writeIndex(OutputStream)}.
1990 	 * <p>
1991 	 * To reduce memory internal state is cleared during this method, rendering
1992 	 * the PackWriter instance useless for anything further than a call to write
1993 	 * out the new bitmaps with {@link #writeBitmapIndex(OutputStream)}.
1994 	 *
1995 	 * @param pm
1996 	 *            progress monitor to report bitmap building work.
1997 	 * @return whether a bitmap index may be written.
1998 	 * @throws IOException
1999 	 *             when some I/O problem occur during reading objects.
2000 	 */
2001 	public boolean prepareBitmapIndex(ProgressMonitor pm) throws IOException {
2002 		if (!canBuildBitmaps || getObjectCount() > Integer.MAX_VALUE
2003 				|| !cachedPacks.isEmpty())
2004 			return false;
2005 
2006 		if (pm == null)
2007 			pm = NullProgressMonitor.INSTANCE;
2008 
2009 		int numCommits = objectsLists[OBJ_COMMIT].size();
2010 		List<ObjectToPack> byName = sortByName();
2011 		sortedByName = null;
2012 		objectsLists = null;
2013 		objectsMap = null;
2014 		writeBitmaps = new PackBitmapIndexBuilder(byName);
2015 		byName = null;
2016 
2017 		PackWriterBitmapPreparer bitmapPreparer = new PackWriterBitmapPreparer(
2018 				reader, writeBitmaps, pm, stats.interestingObjects);
2019 
2020 		Collection<PackWriterBitmapPreparer.BitmapCommit> selectedCommits =
2021 				bitmapPreparer.doCommitSelection(numCommits);
2022 
2023 		beginPhase(PackingPhase.BUILDING_BITMAPS, pm, selectedCommits.size());
2024 
2025 		PackWriterBitmapWalker walker = bitmapPreparer.newBitmapWalker();
2026 		AnyObjectId last = null;
2027 		for (PackWriterBitmapPreparer.BitmapCommit cmit : selectedCommits) {
2028 			if (cmit.isReuseWalker())
2029 				walker.reset();
2030 			else
2031 				walker = bitmapPreparer.newBitmapWalker();
2032 
2033 			BitmapBuilder bitmap = walker.findObjects(
2034 					Collections.singleton(cmit), null, false);
2035 
2036 			if (last != null && cmit.isReuseWalker() && !bitmap.contains(last))
2037 				throw new IllegalStateException(MessageFormat.format(
2038 						JGitText.get().bitmapMissingObject, cmit.name(),
2039 						last.name()));
2040 			last = cmit;
2041 			writeBitmaps.addBitmap(cmit, bitmap.build(), cmit.getFlags());
2042 
2043 			pm.update(1);
2044 		}
2045 
2046 		endPhase(pm);
2047 		return true;
2048 	}
2049 
2050 	private boolean reuseDeltaFor(ObjectToPack otp) {
2051 		int type = otp.getType();
2052 		if ((type & 2) != 0) // OBJ_TREE(2) or OBJ_BLOB(3)
2053 			return true;
2054 		if (type == OBJ_COMMIT)
2055 			return reuseDeltaCommits;
2056 		if (type == OBJ_TAG)
2057 			return false;
2058 		return true;
2059 	}
2060 
2061 	/**
2062 	 * Summary of how PackWriter created the pack.
2063 	 *
2064 	 * @deprecated Use {@link PackStatistics} instead.
2065 	 */
2066 	@Deprecated
2067 	public static class Statistics {
2068 		/** Statistics about a single class of object. */
2069 		public static class ObjectType {
2070 			// All requests are forwarded to this object.
2071 			private PackStatistics.ObjectType objectType;
2072 
2073 			/**
2074 			 * Wraps an
2075 			 * {@link org.eclipse.jgit.storage.pack.PackStatistics.ObjectType}
2076 			 * instance to maintain backwards compatibility with existing API.
2077 			 *
2078 			 * @param type
2079 			 *            the wrapped instance
2080 			 */
2081 			public ObjectType(PackStatistics.ObjectType type) {
2082 				objectType = type;
2083 			}
2084 
2085 			/**
2086 			 * @return total number of objects output. This total includes the
2087 			 *         value of {@link #getDeltas()}.
2088 			 */
2089 			public long getObjects() {
2090 				return objectType.getObjects();
2091 			}
2092 
2093 			/**
2094 			 * @return total number of deltas output. This may be lower than the
2095 			 *         actual number of deltas if a cached pack was reused.
2096 			 */
2097 			public long getDeltas() {
2098 				return objectType.getDeltas();
2099 			}
2100 
2101 			/**
2102 			 * @return number of objects whose existing representation was
2103 			 *         reused in the output. This count includes
2104 			 *         {@link #getReusedDeltas()}.
2105 			 */
2106 			public long getReusedObjects() {
2107 				return objectType.getReusedObjects();
2108 			}
2109 
2110 			/**
2111 			 * @return number of deltas whose existing representation was reused
2112 			 *         in the output, as their base object was also output or
2113 			 *         was assumed present for a thin pack. This may be lower
2114 			 *         than the actual number of reused deltas if a cached pack
2115 			 *         was reused.
2116 			 */
2117 			public long getReusedDeltas() {
2118 				return objectType.getReusedDeltas();
2119 			}
2120 
2121 			/**
2122 			 * @return total number of bytes written. This size includes the
2123 			 *         object headers as well as the compressed data. This size
2124 			 *         also includes all of {@link #getDeltaBytes()}.
2125 			 */
2126 			public long getBytes() {
2127 				return objectType.getBytes();
2128 			}
2129 
2130 			/**
2131 			 * @return number of delta bytes written. This size includes the
2132 			 *         object headers for the delta objects.
2133 			 */
2134 			public long getDeltaBytes() {
2135 				return objectType.getDeltaBytes();
2136 			}
2137 		}
2138 
2139 		// All requests are forwarded to this object.
2140 		private PackStatistics statistics;
2141 
2142 		/**
2143 		 * Wraps a {@link PackStatistics} object to maintain backwards
2144 		 * compatibility with existing API.
2145 		 *
2146 		 * @param stats
2147 		 *            the wrapped PackStatitics object
2148 		 */
2149 		public Statistics(PackStatistics stats) {
2150 			statistics = stats;
2151 		}
2152 
2153 		/**
2154 		 * @return unmodifiable collection of objects to be included in the
2155 		 *         pack. May be null if the pack was hand-crafted in a unit
2156 		 *         test.
2157 		 */
2158 		public Set<ObjectId> getInterestingObjects() {
2159 			return statistics.getInterestingObjects();
2160 		}
2161 
2162 		/**
2163 		 * @return unmodifiable collection of objects that should be excluded
2164 		 *         from the pack, as the peer that will receive the pack already
2165 		 *         has these objects.
2166 		 */
2167 		public Set<ObjectId> getUninterestingObjects() {
2168 			return statistics.getUninterestingObjects();
2169 		}
2170 
2171 		/**
2172 		 * @return unmodifiable collection of the cached packs that were reused
2173 		 *         in the output, if any were selected for reuse.
2174 		 */
2175 		public Collection<CachedPack> getReusedPacks() {
2176 			return statistics.getReusedPacks();
2177 		}
2178 
2179 		/**
2180 		 * @return number of objects in the output pack that went through the
2181 		 *         delta search process in order to find a potential delta base.
2182 		 */
2183 		public int getDeltaSearchNonEdgeObjects() {
2184 			return statistics.getDeltaSearchNonEdgeObjects();
2185 		}
2186 
2187 		/**
2188 		 * @return number of objects in the output pack that went through delta
2189 		 *         base search and found a suitable base. This is a subset of
2190 		 *         {@link #getDeltaSearchNonEdgeObjects()}.
2191 		 */
2192 		public int getDeltasFound() {
2193 			return statistics.getDeltasFound();
2194 		}
2195 
2196 		/**
2197 		 * @return total number of objects output. This total includes the value
2198 		 *         of {@link #getTotalDeltas()}.
2199 		 */
2200 		public long getTotalObjects() {
2201 			return statistics.getTotalObjects();
2202 		}
2203 
2204 		/**
2205 		 * @return the count of objects that needed to be discovered through an
2206 		 *         object walk because they were not found in bitmap indices.
2207 		 *         Returns -1 if no bitmap indices were found.
2208 		 *
2209 		 * @since 4.0
2210 		 */
2211 		public long getBitmapIndexMisses() {
2212 			return statistics.getBitmapIndexMisses();
2213 		}
2214 
2215 		/**
2216 		 * @return total number of deltas output. This may be lower than the
2217 		 *         actual number of deltas if a cached pack was reused.
2218 		 */
2219 		public long getTotalDeltas() {
2220 			return statistics.getTotalDeltas();
2221 		}
2222 
2223 		/**
2224 		 * @return number of objects whose existing representation was reused in
2225 		 *         the output. This count includes {@link #getReusedDeltas()}.
2226 		 */
2227 		public long getReusedObjects() {
2228 			return statistics.getReusedObjects();
2229 		}
2230 
2231 		/**
2232 		 * @return number of deltas whose existing representation was reused in
2233 		 *         the output, as their base object was also output or was
2234 		 *         assumed present for a thin pack. This may be lower than the
2235 		 *         actual number of reused deltas if a cached pack was reused.
2236 		 */
2237 		public long getReusedDeltas() {
2238 			return statistics.getReusedDeltas();
2239 		}
2240 
2241 		/**
2242 		 * @return total number of bytes written. This size includes the pack
2243 		 *         header, trailer, thin pack, and reused cached pack(s).
2244 		 */
2245 		public long getTotalBytes() {
2246 			return statistics.getTotalBytes();
2247 		}
2248 
2249 		/**
2250 		 * @return size of the thin pack in bytes, if a thin pack was generated.
2251 		 *         A thin pack is created when the client already has objects
2252 		 *         and some deltas are created against those objects, or if a
2253 		 *         cached pack is being used and some deltas will reference
2254 		 *         objects in the cached pack. This size does not include the
2255 		 *         pack header or trailer.
2256 		 */
2257 		public long getThinPackBytes() {
2258 			return statistics.getThinPackBytes();
2259 		}
2260 
2261 		/**
2262 		 * @param typeCode
2263 		 *            object type code, e.g. OBJ_COMMIT or OBJ_TREE.
2264 		 * @return information about this type of object in the pack.
2265 		 */
2266 		public ObjectType byObjectType(int typeCode) {
2267 			return new ObjectType(statistics.byObjectType(typeCode));
2268 		}
2269 
2270 		/** @return true if the resulting pack file was a shallow pack. */
2271 		public boolean isShallow() {
2272 			return statistics.isShallow();
2273 		}
2274 
2275 		/** @return depth (in commits) the pack includes if shallow. */
2276 		public int getDepth() {
2277 			return statistics.getDepth();
2278 		}
2279 
2280 		/**
2281 		 * @return time in milliseconds spent enumerating the objects that need
2282 		 *         to be included in the output. This time includes any restarts
2283 		 *         that occur when a cached pack is selected for reuse.
2284 		 */
2285 		public long getTimeCounting() {
2286 			return statistics.getTimeCounting();
2287 		}
2288 
2289 		/**
2290 		 * @return time in milliseconds spent matching existing representations
2291 		 *         against objects that will be transmitted, or that the client
2292 		 *         can be assumed to already have.
2293 		 */
2294 		public long getTimeSearchingForReuse() {
2295 			return statistics.getTimeSearchingForReuse();
2296 		}
2297 
2298 		/**
2299 		 * @return time in milliseconds spent finding the sizes of all objects
2300 		 *         that will enter the delta compression search window. The
2301 		 *         sizes need to be known to better match similar objects
2302 		 *         together and improve delta compression ratios.
2303 		 */
2304 		public long getTimeSearchingForSizes() {
2305 			return statistics.getTimeSearchingForSizes();
2306 		}
2307 
2308 		/**
2309 		 * @return time in milliseconds spent on delta compression. This is
2310 		 *         observed wall-clock time and does not accurately track CPU
2311 		 *         time used when multiple threads were used to perform the
2312 		 *         delta compression.
2313 		 */
2314 		public long getTimeCompressing() {
2315 			return statistics.getTimeCompressing();
2316 		}
2317 
2318 		/**
2319 		 * @return time in milliseconds spent writing the pack output, from
2320 		 *         start of header until end of trailer. The transfer speed can
2321 		 *         be approximated by dividing {@link #getTotalBytes()} by this
2322 		 *         value.
2323 		 */
2324 		public long getTimeWriting() {
2325 			return statistics.getTimeWriting();
2326 		}
2327 
2328 		/** @return total time spent processing this pack. */
2329 		public long getTimeTotal() {
2330 			return statistics.getTimeTotal();
2331 		}
2332 
2333 		/**
2334 		 * @return get the average output speed in terms of bytes-per-second.
2335 		 *         {@code getTotalBytes() / (getTimeWriting() / 1000.0)}.
2336 		 */
2337 		public double getTransferRate() {
2338 			return statistics.getTransferRate();
2339 		}
2340 
2341 		/** @return formatted message string for display to clients. */
2342 		public String getMessage() {
2343 			return statistics.getMessage();
2344 		}
2345 	}
2346 
2347 	private class MutableState {
2348 		/** Estimated size of a single ObjectToPack instance. */
2349 		// Assume 64-bit pointers, since this is just an estimate.
2350 		private static final long OBJECT_TO_PACK_SIZE =
2351 				(2 * 8)               // Object header
2352 				+ (2 * 8) + (2 * 8)   // ObjectToPack fields
2353 				+ (8 + 8)             // PackedObjectInfo fields
2354 				+ 8                   // ObjectIdOwnerMap fields
2355 				+ 40                  // AnyObjectId fields
2356 				+ 8;                  // Reference in BlockList
2357 
2358 		private final long totalDeltaSearchBytes;
2359 
2360 		private volatile PackingPhase phase;
2361 
2362 		MutableState() {
2363 			phase = PackingPhase.COUNTING;
2364 			if (config.isDeltaCompress()) {
2365 				int threads = config.getThreads();
2366 				if (threads <= 0)
2367 					threads = Runtime.getRuntime().availableProcessors();
2368 				totalDeltaSearchBytes = (threads * config.getDeltaSearchMemoryLimit())
2369 						+ config.getBigFileThreshold();
2370 			} else
2371 				totalDeltaSearchBytes = 0;
2372 		}
2373 
2374 		State snapshot() {
2375 			long objCnt = 0;
2376 			BlockList<ObjectToPack>[] lists = objectsLists;
2377 			if (lists != null) {
2378 				objCnt += lists[OBJ_COMMIT].size();
2379 				objCnt += lists[OBJ_TREE].size();
2380 				objCnt += lists[OBJ_BLOB].size();
2381 				objCnt += lists[OBJ_TAG].size();
2382 				// Exclude CachedPacks.
2383 			}
2384 
2385 			long bytesUsed = OBJECT_TO_PACK_SIZE * objCnt;
2386 			PackingPhase curr = phase;
2387 			if (curr == PackingPhase.COMPRESSING)
2388 				bytesUsed += totalDeltaSearchBytes;
2389 			return new State(curr, bytesUsed);
2390 		}
2391 	}
2392 
2393 	/** Possible states that a PackWriter can be in. */
2394 	public static enum PackingPhase {
2395 		/** Counting objects phase. */
2396 		COUNTING,
2397 
2398 		/** Getting sizes phase. */
2399 		GETTING_SIZES,
2400 
2401 		/** Finding sources phase. */
2402 		FINDING_SOURCES,
2403 
2404 		/** Compressing objects phase. */
2405 		COMPRESSING,
2406 
2407 		/** Writing objects phase. */
2408 		WRITING,
2409 
2410 		/** Building bitmaps phase. */
2411 		BUILDING_BITMAPS;
2412 	}
2413 
2414 	/** Summary of the current state of a PackWriter. */
2415 	public class State {
2416 		private final PackingPhase phase;
2417 
2418 		private final long bytesUsed;
2419 
2420 		State(PackingPhase phase, long bytesUsed) {
2421 			this.phase = phase;
2422 			this.bytesUsed = bytesUsed;
2423 		}
2424 
2425 		/** @return the PackConfig used to build the writer. */
2426 		public PackConfig getConfig() {
2427 			return config;
2428 		}
2429 
2430 		/** @return the current phase of the writer. */
2431 		public PackingPhase getPhase() {
2432 			return phase;
2433 		}
2434 
2435 		/** @return an estimate of the total memory used by the writer. */
2436 		public long estimateBytesUsed() {
2437 			return bytesUsed;
2438 		}
2439 
2440 		@SuppressWarnings("nls")
2441 		@Override
2442 		public String toString() {
2443 			return "PackWriter.State[" + phase + ", memory=" + bytesUsed + "]";
2444 		}
2445 	}
2446 }