View Javadoc
1   /*
2    * Copyright (C) 2011, Google Inc.
3    * and other copyright owners as documented in the project's IP log.
4    *
5    * This program and the accompanying materials are made available
6    * under the terms of the Eclipse Distribution License v1.0 which
7    * accompanies this distribution, is reproduced below, and is
8    * available at http://www.eclipse.org/org/documents/edl-v10.php
9    *
10   * All rights reserved.
11   *
12   * Redistribution and use in source and binary forms, with or
13   * without modification, are permitted provided that the following
14   * conditions are met:
15   *
16   * - Redistributions of source code must retain the above copyright
17   *   notice, this list of conditions and the following disclaimer.
18   *
19   * - Redistributions in binary form must reproduce the above
20   *   copyright notice, this list of conditions and the following
21   *   disclaimer in the documentation and/or other materials provided
22   *   with the distribution.
23   *
24   * - Neither the name of the Eclipse Foundation, Inc. nor the
25   *   names of its contributors may be used to endorse or promote
26   *   products derived from this software without specific prior
27   *   written permission.
28   *
29   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
30   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
31   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
34   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42   */
43  
44  package org.eclipse.jgit.internal.storage.dfs;
45  
46  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.COMPACT;
47  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC;
48  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC_REST;
49  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC_TXN;
50  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.INSERT;
51  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.RECEIVE;
52  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
53  import static org.eclipse.jgit.internal.storage.dfs.DfsPackCompactor.configureReftable;
54  import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
55  import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
56  import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
57  import static org.eclipse.jgit.internal.storage.pack.PackExt.REFTABLE;
58  import static org.eclipse.jgit.internal.storage.pack.PackWriter.NONE;
59  
60  import java.io.IOException;
61  import java.util.ArrayList;
62  import java.util.Arrays;
63  import java.util.Calendar;
64  import java.util.Collection;
65  import java.util.EnumSet;
66  import java.util.GregorianCalendar;
67  import java.util.HashSet;
68  import java.util.List;
69  import java.util.Set;
70  import java.util.concurrent.TimeUnit;
71  
72  import org.eclipse.jgit.internal.JGitText;
73  import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
74  import org.eclipse.jgit.internal.storage.file.PackIndex;
75  import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
76  import org.eclipse.jgit.internal.storage.pack.PackExt;
77  import org.eclipse.jgit.internal.storage.pack.PackWriter;
78  import org.eclipse.jgit.internal.storage.reftable.ReftableCompactor;
79  import org.eclipse.jgit.internal.storage.reftable.ReftableConfig;
80  import org.eclipse.jgit.internal.storage.reftable.ReftableWriter;
81  import org.eclipse.jgit.internal.storage.reftree.RefTreeNames;
82  import org.eclipse.jgit.lib.AnyObjectId;
83  import org.eclipse.jgit.lib.Constants;
84  import org.eclipse.jgit.lib.NullProgressMonitor;
85  import org.eclipse.jgit.lib.ObjectId;
86  import org.eclipse.jgit.lib.ObjectIdSet;
87  import org.eclipse.jgit.lib.ProgressMonitor;
88  import org.eclipse.jgit.lib.Ref;
89  import org.eclipse.jgit.lib.RefDatabase;
90  import org.eclipse.jgit.revwalk.RevWalk;
91  import org.eclipse.jgit.storage.pack.PackConfig;
92  import org.eclipse.jgit.storage.pack.PackStatistics;
93  import org.eclipse.jgit.util.SystemReader;
94  import org.eclipse.jgit.util.io.CountingOutputStream;
95  
96  /**
97   * Repack and garbage collect a repository.
98   */
99  public class DfsGarbageCollector {
100 	private final DfsRepository repo;
101 	private final RefDatabase refdb;
102 	private final DfsObjDatabase objdb;
103 
104 	private final List<DfsPackDescription> newPackDesc;
105 	private final List<PackStatistics> newPackStats;
106 	private final List<ObjectIdSet> newPackObj;
107 
108 	private DfsReader ctx;
109 
110 	private PackConfig packConfig;
111 	private ReftableConfig reftableConfig;
112 	private boolean convertToReftable = true;
113 	private boolean includeDeletes;
114 	private long reftableInitialMinUpdateIndex = 1;
115 	private long reftableInitialMaxUpdateIndex = 1;
116 
117 	// See packIsCoalesceableGarbage(), below, for how these two variables
118 	// interact.
119 	private long coalesceGarbageLimit = 50 << 20;
120 	private long garbageTtlMillis = TimeUnit.DAYS.toMillis(1);
121 
122 	private long startTimeMillis;
123 	private List<DfsPackFile> packsBefore;
124 	private List<DfsReftable> reftablesBefore;
125 	private List<DfsPackFile> expiredGarbagePacks;
126 
127 	private Collection<Ref> refsBefore;
128 	private Set<ObjectId> allHeadsAndTags;
129 	private Set<ObjectId> allTags;
130 	private Set<ObjectId> nonHeads;
131 	private Set<ObjectId> txnHeads;
132 	private Set<ObjectId> tagTargets;
133 
134 	/**
135 	 * Initialize a garbage collector.
136 	 *
137 	 * @param repository
138 	 *            repository objects to be packed will be read from.
139 	 */
140 	public DfsGarbageCollector(DfsRepository repository) {
141 		repo = repository;
142 		refdb = repo.getRefDatabase();
143 		objdb = repo.getObjectDatabase();
144 		newPackDesc = new ArrayList<>(4);
145 		newPackStats = new ArrayList<>(4);
146 		newPackObj = new ArrayList<>(4);
147 
148 		packConfig = new PackConfig(repo);
149 		packConfig.setIndexVersion(2);
150 	}
151 
152 	/**
153 	 * Get configuration used to generate the new pack file.
154 	 *
155 	 * @return configuration used to generate the new pack file.
156 	 */
157 	public PackConfig getPackConfig() {
158 		return packConfig;
159 	}
160 
161 	/**
162 	 * Set the new configuration to use when creating the pack file.
163 	 *
164 	 * @param newConfig
165 	 *            the new configuration to use when creating the pack file.
166 	 * @return {@code this}
167 	 */
168 	public DfsGarbageCollector setPackConfig(PackConfig newConfig) {
169 		packConfig = newConfig;
170 		return this;
171 	}
172 
173 	/**
174 	 * Set configuration to write a reftable.
175 	 *
176 	 * @param cfg
177 	 *            configuration to write a reftable. Reftable writing is
178 	 *            disabled (default) when {@code cfg} is {@code null}.
179 	 * @return {@code this}
180 	 */
181 	public DfsGarbageCollector setReftableConfig(ReftableConfig cfg) {
182 		reftableConfig = cfg;
183 		return this;
184 	}
185 
186 	/**
187 	 * Whether the garbage collector should convert references to reftable.
188 	 *
189 	 * @param convert
190 	 *            if {@code true}, {@link #setReftableConfig(ReftableConfig)}
191 	 *            has been set non-null, and a GC reftable doesn't yet exist,
192 	 *            the garbage collector will make one by scanning the existing
193 	 *            references, and writing a new reftable. Default is
194 	 *            {@code true}.
195 	 * @return {@code this}
196 	 */
197 	public DfsGarbageCollector setConvertToReftable(boolean convert) {
198 		convertToReftable = convert;
199 		return this;
200 	}
201 
202 	/**
203 	 * Whether the garbage collector will include tombstones for deleted
204 	 * references in the reftable.
205 	 *
206 	 * @param include
207 	 *            if {@code true}, the garbage collector will include tombstones
208 	 *            for deleted references in the reftable. Default is
209 	 *            {@code false}.
210 	 * @return {@code this}
211 	 */
212 	public DfsGarbageCollector setIncludeDeletes(boolean include) {
213 		includeDeletes = include;
214 		return this;
215 	}
216 
217 	/**
218 	 * Set minUpdateIndex for the initial reftable created during conversion.
219 	 *
220 	 * @param u
221 	 *            minUpdateIndex for the initial reftable created by scanning
222 	 *            {@link org.eclipse.jgit.internal.storage.dfs.DfsRefDatabase#getRefs(String)}.
223 	 *            Ignored unless caller has also set
224 	 *            {@link #setReftableConfig(ReftableConfig)}. Defaults to
225 	 *            {@code 1}. Must be {@code u >= 0}.
226 	 * @return {@code this}
227 	 */
228 	public DfsGarbageCollector setReftableInitialMinUpdateIndex(long u) {
229 		reftableInitialMinUpdateIndex = Math.max(u, 0);
230 		return this;
231 	}
232 
233 	/**
234 	 * Set maxUpdateIndex for the initial reftable created during conversion.
235 	 *
236 	 * @param u
237 	 *            maxUpdateIndex for the initial reftable created by scanning
238 	 *            {@link org.eclipse.jgit.internal.storage.dfs.DfsRefDatabase#getRefs(String)}.
239 	 *            Ignored unless caller has also set
240 	 *            {@link #setReftableConfig(ReftableConfig)}. Defaults to
241 	 *            {@code 1}. Must be {@code u >= 0}.
242 	 * @return {@code this}
243 	 */
244 	public DfsGarbageCollector setReftableInitialMaxUpdateIndex(long u) {
245 		reftableInitialMaxUpdateIndex = Math.max(0, u);
246 		return this;
247 	}
248 
249 	/**
250 	 * Get coalesce garbage limit
251 	 *
252 	 * @return coalesce garbage limit, packs smaller than this size will be
253 	 *         repacked.
254 	 */
255 	public long getCoalesceGarbageLimit() {
256 		return coalesceGarbageLimit;
257 	}
258 
259 	/**
260 	 * Set the byte size limit for garbage packs to be repacked.
261 	 * <p>
262 	 * Any UNREACHABLE_GARBAGE pack smaller than this limit will be repacked at
263 	 * the end of the run. This allows the garbage collector to coalesce
264 	 * unreachable objects into a single file.
265 	 * <p>
266 	 * If an UNREACHABLE_GARBAGE pack is already larger than this limit it will
267 	 * be left alone by the garbage collector. This avoids unnecessary disk IO
268 	 * reading and copying the objects.
269 	 * <p>
270 	 * If limit is set to 0 the UNREACHABLE_GARBAGE coalesce is disabled.<br>
271 	 * If limit is set to {@link java.lang.Long#MAX_VALUE}, everything is
272 	 * coalesced.
273 	 * <p>
274 	 * Keeping unreachable garbage prevents race conditions with repository
275 	 * changes that may suddenly need an object whose only copy was stored in
276 	 * the UNREACHABLE_GARBAGE pack.
277 	 *
278 	 * @param limit
279 	 *            size in bytes.
280 	 * @return {@code this}
281 	 */
282 	public DfsGarbageCollector setCoalesceGarbageLimit(long limit) {
283 		coalesceGarbageLimit = limit;
284 		return this;
285 	}
286 
287 	/**
288 	 * Get time to live for garbage packs.
289 	 *
290 	 * @return garbage packs older than this limit (in milliseconds) will be
291 	 *         pruned as part of the garbage collection process if the value is
292 	 *         &gt; 0, otherwise garbage packs are retained.
293 	 */
294 	public long getGarbageTtlMillis() {
295 		return garbageTtlMillis;
296 	}
297 
298 	/**
299 	 * Set the time to live for garbage objects.
300 	 * <p>
301 	 * Any UNREACHABLE_GARBAGE older than this limit will be pruned at the end
302 	 * of the run.
303 	 * <p>
304 	 * If timeToLiveMillis is set to 0, UNREACHABLE_GARBAGE purging is disabled.
305 	 *
306 	 * @param ttl
307 	 *            Time to live whatever unit is specified.
308 	 * @param unit
309 	 *            The specified time unit.
310 	 * @return {@code this}
311 	 */
312 	public DfsGarbageCollector setGarbageTtl(long ttl, TimeUnit unit) {
313 		garbageTtlMillis = unit.toMillis(ttl);
314 		return this;
315 	}
316 
317 	/**
318 	 * Create a single new pack file containing all of the live objects.
319 	 * <p>
320 	 * This method safely decides which packs can be expired after the new pack
321 	 * is created by validating the references have not been modified in an
322 	 * incompatible way.
323 	 *
324 	 * @param pm
325 	 *            progress monitor to receive updates on as packing may take a
326 	 *            while, depending on the size of the repository.
327 	 * @return true if the repack was successful without race conditions. False
328 	 *         if a race condition was detected and the repack should be run
329 	 *         again later.
330 	 * @throws java.io.IOException
331 	 *             a new pack cannot be created.
332 	 */
333 	public boolean pack(ProgressMonitor pm) throws IOException {
334 		if (pm == null)
335 			pm = NullProgressMonitor.INSTANCE;
336 		if (packConfig.getIndexVersion() != 2)
337 			throw new IllegalStateException(
338 					JGitText.get().supportOnlyPackIndexVersion2);
339 
340 		startTimeMillis = SystemReader.getInstance().getCurrentTime();
341 		ctx = objdb.newReader();
342 		try {
343 			refdb.refresh();
344 			objdb.clearCache();
345 
346 			refsBefore = getAllRefs();
347 			readPacksBefore();
348 			readReftablesBefore();
349 
350 			Set<ObjectId> allHeads = new HashSet<>();
351 			allHeadsAndTags = new HashSet<>();
352 			allTags = new HashSet<>();
353 			nonHeads = new HashSet<>();
354 			txnHeads = new HashSet<>();
355 			tagTargets = new HashSet<>();
356 			for (Ref ref : refsBefore) {
357 				if (ref.isSymbolic() || ref.getObjectId() == null) {
358 					continue;
359 				}
360 				if (isHead(ref)) {
361 					allHeads.add(ref.getObjectId());
362 				} else if (isTag(ref)) {
363 					allTags.add(ref.getObjectId());
364 				} else if (RefTreeNames.isRefTree(refdb, ref.getName())) {
365 					txnHeads.add(ref.getObjectId());
366 				} else {
367 					nonHeads.add(ref.getObjectId());
368 				}
369 				if (ref.getPeeledObjectId() != null) {
370 					tagTargets.add(ref.getPeeledObjectId());
371 				}
372 			}
373 			// Don't exclude tags that are also branch tips.
374 			allTags.removeAll(allHeads);
375 			allHeadsAndTags.addAll(allHeads);
376 			allHeadsAndTags.addAll(allTags);
377 
378 			// Hoist all branch tips and tags earlier in the pack file
379 			tagTargets.addAll(allHeadsAndTags);
380 
381 			// Combine the GC_REST objects into the GC pack if requested
382 			if (packConfig.getSinglePack()) {
383 				allHeadsAndTags.addAll(nonHeads);
384 				nonHeads.clear();
385 			}
386 
387 			boolean rollback = true;
388 			try {
389 				packHeads(pm);
390 				packRest(pm);
391 				packRefTreeGraph(pm);
392 				packGarbage(pm);
393 				objdb.commitPack(newPackDesc, toPrune());
394 				rollback = false;
395 				return true;
396 			} finally {
397 				if (rollback)
398 					objdb.rollbackPack(newPackDesc);
399 			}
400 		} finally {
401 			ctx.close();
402 		}
403 	}
404 
405 	private Collection<Ref> getAllRefs() throws IOException {
406 		Collection<Ref> refs = refdb.getRefs();
407 		List<Ref> addl = refdb.getAdditionalRefs();
408 		if (!addl.isEmpty()) {
409 			List<Ref> all = new ArrayList<>(refs.size() + addl.size());
410 			all.addAll(refs);
411 			// add additional refs which start with refs/
412 			for (Ref r : addl) {
413 				if (r.getName().startsWith(Constants.R_REFS)) {
414 					all.add(r);
415 				}
416 			}
417 			return all;
418 		}
419 		return refs;
420 	}
421 
422 	private void readPacksBefore() throws IOException {
423 		DfsPackFile[] packs = objdb.getPacks();
424 		packsBefore = new ArrayList<>(packs.length);
425 		expiredGarbagePacks = new ArrayList<>(packs.length);
426 
427 		long now = SystemReader.getInstance().getCurrentTime();
428 		for (DfsPackFile p : packs) {
429 			DfsPackDescription d = p.getPackDescription();
430 			if (d.getPackSource() != UNREACHABLE_GARBAGE) {
431 				packsBefore.add(p);
432 			} else if (packIsExpiredGarbage(d, now)) {
433 				expiredGarbagePacks.add(p);
434 			} else if (packIsCoalesceableGarbage(d, now)) {
435 				packsBefore.add(p);
436 			}
437 		}
438 	}
439 
440 	private void readReftablesBefore() throws IOException {
441 		DfsReftable[] tables = objdb.getReftables();
442 		reftablesBefore = new ArrayList<>(Arrays.asList(tables));
443 	}
444 
445 	private boolean packIsExpiredGarbage(DfsPackDescription d, long now) {
446 		// Consider the garbage pack as expired when it's older than
447 		// garbagePackTtl. This check gives concurrent inserter threads
448 		// sufficient time to identify an object is not in the graph and should
449 		// have a new copy written, rather than relying on something from an
450 		// UNREACHABLE_GARBAGE pack.
451 		return d.getPackSource() == UNREACHABLE_GARBAGE
452 				&& garbageTtlMillis > 0
453 				&& now - d.getLastModified() >= garbageTtlMillis;
454 	}
455 
456 	private boolean packIsCoalesceableGarbage(DfsPackDescription d, long now) {
457 		// An UNREACHABLE_GARBAGE pack can be coalesced if its size is less than
458 		// the coalesceGarbageLimit and either garbageTtl is zero or if the pack
459 		// is created in a close time interval (on a single calendar day when
460 		// the garbageTtl is more than one day or one third of the garbageTtl).
461 		//
462 		// When the garbageTtl is more than 24 hours, garbage packs that are
463 		// created within a single calendar day are coalesced together. This
464 		// would make the effective ttl of the garbage pack as garbageTtl+23:59
465 		// and limit the number of garbage to a maximum number of
466 		// garbageTtl_in_days + 1 (assuming all of them are less than the size
467 		// of coalesceGarbageLimit).
468 		//
469 		// When the garbageTtl is less than or equal to 24 hours, garbage packs
470 		// that are created within a one third of garbageTtl are coalesced
471 		// together. This would make the effective ttl of the garbage packs as
472 		// garbageTtl + (garbageTtl / 3) and would limit the number of garbage
473 		// packs to a maximum number of 4 (assuming all of them are less than
474 		// the size of coalesceGarbageLimit).
475 
476 		if (d.getPackSource() != UNREACHABLE_GARBAGE
477 				|| d.getFileSize(PackExt.PACK) >= coalesceGarbageLimit) {
478 			return false;
479 		}
480 
481 		if (garbageTtlMillis == 0) {
482 			return true;
483 		}
484 
485 		long lastModified = d.getLastModified();
486 		long dayStartLastModified = dayStartInMillis(lastModified);
487 		long dayStartToday = dayStartInMillis(now);
488 
489 		if (dayStartLastModified != dayStartToday) {
490 			return false; // this pack is not created today.
491 		}
492 
493 		if (garbageTtlMillis > TimeUnit.DAYS.toMillis(1)) {
494 			return true; // ttl is more than one day and pack is created today.
495 		}
496 
497 		long timeInterval = garbageTtlMillis / 3;
498 		if (timeInterval == 0) {
499 			return false; // ttl is too small, don't try to coalesce.
500 		}
501 
502 		long modifiedTimeSlot = (lastModified - dayStartLastModified) / timeInterval;
503 		long presentTimeSlot = (now - dayStartToday) / timeInterval;
504 		return modifiedTimeSlot == presentTimeSlot;
505 	}
506 
507 	private static long dayStartInMillis(long timeInMillis) {
508 		Calendar cal = new GregorianCalendar(
509 				SystemReader.getInstance().getTimeZone());
510 		cal.setTimeInMillis(timeInMillis);
511 		cal.set(Calendar.HOUR_OF_DAY, 0);
512 		cal.set(Calendar.MINUTE, 0);
513 		cal.set(Calendar.SECOND, 0);
514 		cal.set(Calendar.MILLISECOND, 0);
515 		return cal.getTimeInMillis();
516 	}
517 
518 	/**
519 	 * Get all of the source packs that fed into this compaction.
520 	 *
521 	 * @return all of the source packs that fed into this compaction.
522 	 */
523 	public Set<DfsPackDescription> getSourcePacks() {
524 		return toPrune();
525 	}
526 
527 	/**
528 	 * Get new packs created by this compaction.
529 	 *
530 	 * @return new packs created by this compaction.
531 	 */
532 	public List<DfsPackDescription> getNewPacks() {
533 		return newPackDesc;
534 	}
535 
536 	/**
537 	 * Get statistics corresponding to the {@link #getNewPacks()}.
538 	 * <p>
539 	 * The elements can be null if the stat is not available for the pack file.
540 	 *
541 	 * @return statistics corresponding to the {@link #getNewPacks()}.
542 	 */
543 	public List<PackStatistics> getNewPackStatistics() {
544 		return newPackStats;
545 	}
546 
547 	private Set<DfsPackDescription> toPrune() {
548 		Set<DfsPackDescription> toPrune = new HashSet<>();
549 		for (DfsPackFile pack : packsBefore) {
550 			toPrune.add(pack.getPackDescription());
551 		}
552 		if (reftableConfig != null) {
553 			for (DfsReftable table : reftablesBefore) {
554 				toPrune.add(table.getPackDescription());
555 			}
556 		}
557 		for (DfsPackFile pack : expiredGarbagePacks) {
558 			toPrune.add(pack.getPackDescription());
559 		}
560 		return toPrune;
561 	}
562 
563 	private void packHeads(ProgressMonitor pm) throws IOException {
564 		if (allHeadsAndTags.isEmpty()) {
565 			writeReftable();
566 			return;
567 		}
568 
569 		try (PackWriter pw = newPackWriter()) {
570 			pw.setTagTargets(tagTargets);
571 			pw.preparePack(pm, allHeadsAndTags, NONE, NONE, allTags);
572 			if (0 < pw.getObjectCount()) {
573 				long estSize = estimateGcPackSize(INSERT, RECEIVE, COMPACT, GC);
574 				writePack(GC, pw, pm, estSize);
575 			} else {
576 				writeReftable();
577 			}
578 		}
579 	}
580 
581 	private void packRest(ProgressMonitor pm) throws IOException {
582 		if (nonHeads.isEmpty())
583 			return;
584 
585 		try (PackWriter pw = newPackWriter()) {
586 			for (ObjectIdSet packedObjs : newPackObj)
587 				pw.excludeObjects(packedObjs);
588 			pw.preparePack(pm, nonHeads, allHeadsAndTags);
589 			if (0 < pw.getObjectCount())
590 				writePack(GC_REST, pw, pm,
591 						estimateGcPackSize(INSERT, RECEIVE, COMPACT, GC_REST));
592 		}
593 	}
594 
595 	private void packRefTreeGraph(ProgressMonitor pm) throws IOException {
596 		if (txnHeads.isEmpty())
597 			return;
598 
599 		try (PackWriter pw = newPackWriter()) {
600 			for (ObjectIdSet packedObjs : newPackObj)
601 				pw.excludeObjects(packedObjs);
602 			pw.preparePack(pm, txnHeads, NONE);
603 			if (0 < pw.getObjectCount())
604 				writePack(GC_TXN, pw, pm, 0 /* unknown pack size */);
605 		}
606 	}
607 
608 	private void packGarbage(ProgressMonitor pm) throws IOException {
609 		PackConfig cfg = new PackConfig(packConfig);
610 		cfg.setReuseDeltas(true);
611 		cfg.setReuseObjects(true);
612 		cfg.setDeltaCompress(false);
613 		cfg.setBuildBitmaps(false);
614 
615 		try (PackWriter pw = new PackWriter(cfg, ctx);
616 				RevWalk pool = new RevWalk(ctx)) {
617 			pw.setDeltaBaseAsOffset(true);
618 			pw.setReuseDeltaCommits(true);
619 			pm.beginTask(JGitText.get().findingGarbage, objectsBefore());
620 			long estimatedPackSize = 12 + 20; // header and trailer sizes.
621 			for (DfsPackFile oldPack : packsBefore) {
622 				PackIndex oldIdx = oldPack.getPackIndex(ctx);
623 				PackReverseIndex oldRevIdx = oldPack.getReverseIdx(ctx);
624 				long maxOffset = oldPack.getPackDescription().getFileSize(PACK)
625 						- 20; // pack size - trailer size.
626 				for (PackIndex.MutableEntry ent : oldIdx) {
627 					pm.update(1);
628 					ObjectId id = ent.toObjectId();
629 					if (pool.lookupOrNull(id) != null || anyPackHas(id))
630 						continue;
631 
632 					long offset = ent.getOffset();
633 					int type = oldPack.getObjectType(ctx, offset);
634 					pw.addObject(pool.lookupAny(id, type));
635 					long objSize = oldRevIdx.findNextOffset(offset, maxOffset)
636 							- offset;
637 					estimatedPackSize += objSize;
638 				}
639 			}
640 			pm.endTask();
641 			if (0 < pw.getObjectCount())
642 				writePack(UNREACHABLE_GARBAGE, pw, pm, estimatedPackSize);
643 		}
644 	}
645 
646 	private boolean anyPackHas(AnyObjectId id) {
647 		for (ObjectIdSet packedObjs : newPackObj)
648 			if (packedObjs.contains(id))
649 				return true;
650 		return false;
651 	}
652 
653 	private static boolean isHead(Ref ref) {
654 		return ref.getName().startsWith(Constants.R_HEADS);
655 	}
656 
657 	private static boolean isTag(Ref ref) {
658 		return ref.getName().startsWith(Constants.R_TAGS);
659 	}
660 
661 	private int objectsBefore() {
662 		int cnt = 0;
663 		for (DfsPackFile p : packsBefore)
664 			cnt += p.getPackDescription().getObjectCount();
665 		return cnt;
666 	}
667 
668 	private PackWriter newPackWriter() {
669 		PackWriter pw = new PackWriter(packConfig, ctx);
670 		pw.setDeltaBaseAsOffset(true);
671 		pw.setReuseDeltaCommits(false);
672 		return pw;
673 	}
674 
675 	private long estimateGcPackSize(PackSource first, PackSource... rest) {
676 		EnumSet<PackSource> sourceSet = EnumSet.of(first, rest);
677 		// Every pack file contains 12 bytes of header and 20 bytes of trailer.
678 		// Include the final pack file header and trailer size here and ignore
679 		// the same from individual pack files.
680 		long size = 32;
681 		for (DfsPackDescription pack : getSourcePacks()) {
682 			if (sourceSet.contains(pack.getPackSource())) {
683 				size += pack.getFileSize(PACK) - 32;
684 			}
685 		}
686 		return size;
687 	}
688 
689 	private DfsPackDescription writePack(PackSource source, PackWriter pw,
690 			ProgressMonitor pm, long estimatedPackSize) throws IOException {
691 		DfsPackDescription pack = repo.getObjectDatabase().newPack(source,
692 				estimatedPackSize);
693 
694 		if (source == GC && reftableConfig != null) {
695 			writeReftable(pack);
696 		}
697 
698 		try (DfsOutputStream out = objdb.writeFile(pack, PACK)) {
699 			pw.writePack(pm, pm, out);
700 			pack.addFileExt(PACK);
701 			pack.setBlockSize(PACK, out.blockSize());
702 		}
703 
704 		try (DfsOutputStream out = objdb.writeFile(pack, INDEX)) {
705 			CountingOutputStream cnt = new CountingOutputStream(out);
706 			pw.writeIndex(cnt);
707 			pack.addFileExt(INDEX);
708 			pack.setFileSize(INDEX, cnt.getCount());
709 			pack.setBlockSize(INDEX, out.blockSize());
710 			pack.setIndexVersion(pw.getIndexVersion());
711 		}
712 
713 		if (pw.prepareBitmapIndex(pm)) {
714 			try (DfsOutputStream out = objdb.writeFile(pack, BITMAP_INDEX)) {
715 				CountingOutputStream cnt = new CountingOutputStream(out);
716 				pw.writeBitmapIndex(cnt);
717 				pack.addFileExt(BITMAP_INDEX);
718 				pack.setFileSize(BITMAP_INDEX, cnt.getCount());
719 				pack.setBlockSize(BITMAP_INDEX, out.blockSize());
720 			}
721 		}
722 
723 		PackStatistics stats = pw.getStatistics();
724 		pack.setPackStats(stats);
725 		pack.setLastModified(startTimeMillis);
726 		newPackDesc.add(pack);
727 		newPackStats.add(stats);
728 		newPackObj.add(pw.getObjectSet());
729 		return pack;
730 	}
731 
732 	private void writeReftable() throws IOException {
733 		if (reftableConfig != null) {
734 			DfsPackDescription pack = objdb.newPack(GC);
735 			newPackDesc.add(pack);
736 			newPackStats.add(null);
737 			writeReftable(pack);
738 		}
739 	}
740 
741 	private void writeReftable(DfsPackDescription pack) throws IOException {
742 		if (convertToReftable && !hasGcReftable()) {
743 			writeReftable(pack, refsBefore);
744 			return;
745 		}
746 
747 		try (ReftableStack stack = ReftableStack.open(ctx, reftablesBefore)) {
748 			ReftableCompactor compact = new ReftableCompactor();
749 			compact.addAll(stack.readers());
750 			compact.setIncludeDeletes(includeDeletes);
751 			compactReftable(pack, compact);
752 		}
753 	}
754 
755 	private boolean hasGcReftable() {
756 		for (DfsReftable table : reftablesBefore) {
757 			if (table.getPackDescription().getPackSource() == GC) {
758 				return true;
759 			}
760 		}
761 		return false;
762 	}
763 
764 	private void writeReftable(DfsPackDescription pack, Collection<Ref> refs)
765 			throws IOException {
766 		try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) {
767 			ReftableConfig cfg = configureReftable(reftableConfig, out);
768 			ReftableWriter writer = new ReftableWriter(cfg)
769 					.setMinUpdateIndex(reftableInitialMinUpdateIndex)
770 					.setMaxUpdateIndex(reftableInitialMaxUpdateIndex)
771 					.begin(out)
772 					.sortAndWriteRefs(refs)
773 					.finish();
774 			pack.addFileExt(REFTABLE);
775 			pack.setReftableStats(writer.getStats());
776 		}
777 	}
778 
779 	private void compactReftable(DfsPackDescription pack,
780 			ReftableCompactor compact) throws IOException {
781 		try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) {
782 			compact.setConfig(configureReftable(reftableConfig, out));
783 			compact.compact(out);
784 			pack.addFileExt(REFTABLE);
785 			pack.setReftableStats(compact.getStats());
786 		}
787 	}
788 }