View Javadoc
1   /*
2    * Copyright (C) 2011, Google Inc. and others
3    *
4    * This program and the accompanying materials are made available under the
5    * terms of the Eclipse Distribution License v. 1.0 which is available at
6    * https://www.eclipse.org/org/documents/edl-v10.php.
7    *
8    * SPDX-License-Identifier: BSD-3-Clause
9    */
10  
11  package org.eclipse.jgit.internal.storage.dfs;
12  
13  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.COMPACT;
14  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC;
15  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.GC_REST;
16  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.INSERT;
17  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.RECEIVE;
18  import static org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource.UNREACHABLE_GARBAGE;
19  import static org.eclipse.jgit.internal.storage.dfs.DfsPackCompactor.configureReftable;
20  import static org.eclipse.jgit.internal.storage.pack.PackExt.BITMAP_INDEX;
21  import static org.eclipse.jgit.internal.storage.pack.PackExt.INDEX;
22  import static org.eclipse.jgit.internal.storage.pack.PackExt.PACK;
23  import static org.eclipse.jgit.internal.storage.pack.PackExt.REFTABLE;
24  import static org.eclipse.jgit.internal.storage.pack.PackWriter.NONE;
25  
26  import java.io.IOException;
27  import java.util.ArrayList;
28  import java.util.Arrays;
29  import java.util.Calendar;
30  import java.util.Collection;
31  import java.util.EnumSet;
32  import java.util.GregorianCalendar;
33  import java.util.HashSet;
34  import java.util.List;
35  import java.util.Set;
36  import java.util.concurrent.TimeUnit;
37  
38  import org.eclipse.jgit.internal.JGitText;
39  import org.eclipse.jgit.internal.storage.dfs.DfsObjDatabase.PackSource;
40  import org.eclipse.jgit.internal.storage.file.PackIndex;
41  import org.eclipse.jgit.internal.storage.file.PackReverseIndex;
42  import org.eclipse.jgit.internal.storage.pack.PackExt;
43  import org.eclipse.jgit.internal.storage.pack.PackWriter;
44  import org.eclipse.jgit.internal.storage.reftable.ReftableCompactor;
45  import org.eclipse.jgit.internal.storage.reftable.ReftableConfig;
46  import org.eclipse.jgit.internal.storage.reftable.ReftableWriter;
47  import org.eclipse.jgit.lib.AnyObjectId;
48  import org.eclipse.jgit.lib.Constants;
49  import org.eclipse.jgit.lib.NullProgressMonitor;
50  import org.eclipse.jgit.lib.ObjectId;
51  import org.eclipse.jgit.lib.ObjectIdSet;
52  import org.eclipse.jgit.lib.ProgressMonitor;
53  import org.eclipse.jgit.lib.Ref;
54  import org.eclipse.jgit.lib.RefDatabase;
55  import org.eclipse.jgit.revwalk.RevWalk;
56  import org.eclipse.jgit.storage.pack.PackConfig;
57  import org.eclipse.jgit.storage.pack.PackStatistics;
58  import org.eclipse.jgit.util.SystemReader;
59  import org.eclipse.jgit.util.io.CountingOutputStream;
60  
61  /**
62   * Repack and garbage collect a repository.
63   */
64  public class DfsGarbageCollector {
65  	private final DfsRepository repo;
66  	private final RefDatabase refdb;
67  	private final DfsObjDatabase objdb;
68  
69  	private final List<DfsPackDescription> newPackDesc;
70  	private final List<PackStatistics> newPackStats;
71  	private final List<ObjectIdSet> newPackObj;
72  
73  	private DfsReader ctx;
74  
75  	private PackConfig packConfig;
76  	private ReftableConfig reftableConfig;
77  	private boolean convertToReftable = true;
78  	private boolean includeDeletes;
79  	private long reftableInitialMinUpdateIndex = 1;
80  	private long reftableInitialMaxUpdateIndex = 1;
81  
82  	// See packIsCoalesceableGarbage(), below, for how these two variables
83  	// interact.
84  	private long coalesceGarbageLimit = 50 << 20;
85  	private long garbageTtlMillis = TimeUnit.DAYS.toMillis(1);
86  
87  	private long startTimeMillis;
88  	private List<DfsPackFile> packsBefore;
89  	private List<DfsReftable> reftablesBefore;
90  	private List<DfsPackFile> expiredGarbagePacks;
91  
92  	private Collection<Ref> refsBefore;
93  	private Set<ObjectId> allHeadsAndTags;
94  	private Set<ObjectId> allTags;
95  	private Set<ObjectId> nonHeads;
96  	private Set<ObjectId> tagTargets;
97  
98  	/**
99  	 * Initialize a garbage collector.
100 	 *
101 	 * @param repository
102 	 *            repository objects to be packed will be read from.
103 	 */
104 	public DfsGarbageCollector(DfsRepository repository) {
105 		repo = repository;
106 		refdb = repo.getRefDatabase();
107 		objdb = repo.getObjectDatabase();
108 		newPackDesc = new ArrayList<>(4);
109 		newPackStats = new ArrayList<>(4);
110 		newPackObj = new ArrayList<>(4);
111 
112 		packConfig = new PackConfig(repo);
113 		packConfig.setIndexVersion(2);
114 	}
115 
116 	/**
117 	 * Get configuration used to generate the new pack file.
118 	 *
119 	 * @return configuration used to generate the new pack file.
120 	 */
121 	public PackConfig getPackConfig() {
122 		return packConfig;
123 	}
124 
125 	/**
126 	 * Set the new configuration to use when creating the pack file.
127 	 *
128 	 * @param newConfig
129 	 *            the new configuration to use when creating the pack file.
130 	 * @return {@code this}
131 	 */
132 	public DfsGarbageCollector setPackConfig(PackConfig newConfig) {
133 		packConfig = newConfig;
134 		return this;
135 	}
136 
137 	/**
138 	 * Set configuration to write a reftable.
139 	 *
140 	 * @param cfg
141 	 *            configuration to write a reftable. Reftable writing is
142 	 *            disabled (default) when {@code cfg} is {@code null}.
143 	 * @return {@code this}
144 	 */
145 	public DfsGarbageCollector setReftableConfig(ReftableConfig cfg) {
146 		reftableConfig = cfg;
147 		return this;
148 	}
149 
150 	/**
151 	 * Whether the garbage collector should convert references to reftable.
152 	 *
153 	 * @param convert
154 	 *            if {@code true}, {@link #setReftableConfig(ReftableConfig)}
155 	 *            has been set non-null, and a GC reftable doesn't yet exist,
156 	 *            the garbage collector will make one by scanning the existing
157 	 *            references, and writing a new reftable. Default is
158 	 *            {@code true}.
159 	 * @return {@code this}
160 	 */
161 	public DfsGarbageCollector setConvertToReftable(boolean convert) {
162 		convertToReftable = convert;
163 		return this;
164 	}
165 
166 	/**
167 	 * Whether the garbage collector will include tombstones for deleted
168 	 * references in the reftable.
169 	 *
170 	 * @param include
171 	 *            if {@code true}, the garbage collector will include tombstones
172 	 *            for deleted references in the reftable. Default is
173 	 *            {@code false}.
174 	 * @return {@code this}
175 	 */
176 	public DfsGarbageCollector setIncludeDeletes(boolean include) {
177 		includeDeletes = include;
178 		return this;
179 	}
180 
181 	/**
182 	 * Set minUpdateIndex for the initial reftable created during conversion.
183 	 *
184 	 * @param u
185 	 *            minUpdateIndex for the initial reftable created by scanning
186 	 *            {@link org.eclipse.jgit.internal.storage.dfs.DfsRefDatabase#getRefs(String)}.
187 	 *            Ignored unless caller has also set
188 	 *            {@link #setReftableConfig(ReftableConfig)}. Defaults to
189 	 *            {@code 1}. Must be {@code u >= 0}.
190 	 * @return {@code this}
191 	 */
192 	public DfsGarbageCollector setReftableInitialMinUpdateIndex(long u) {
193 		reftableInitialMinUpdateIndex = Math.max(u, 0);
194 		return this;
195 	}
196 
197 	/**
198 	 * Set maxUpdateIndex for the initial reftable created during conversion.
199 	 *
200 	 * @param u
201 	 *            maxUpdateIndex for the initial reftable created by scanning
202 	 *            {@link org.eclipse.jgit.internal.storage.dfs.DfsRefDatabase#getRefs(String)}.
203 	 *            Ignored unless caller has also set
204 	 *            {@link #setReftableConfig(ReftableConfig)}. Defaults to
205 	 *            {@code 1}. Must be {@code u >= 0}.
206 	 * @return {@code this}
207 	 */
208 	public DfsGarbageCollector setReftableInitialMaxUpdateIndex(long u) {
209 		reftableInitialMaxUpdateIndex = Math.max(0, u);
210 		return this;
211 	}
212 
213 	/**
214 	 * Get coalesce garbage limit
215 	 *
216 	 * @return coalesce garbage limit, packs smaller than this size will be
217 	 *         repacked.
218 	 */
219 	public long getCoalesceGarbageLimit() {
220 		return coalesceGarbageLimit;
221 	}
222 
223 	/**
224 	 * Set the byte size limit for garbage packs to be repacked.
225 	 * <p>
226 	 * Any UNREACHABLE_GARBAGE pack smaller than this limit will be repacked at
227 	 * the end of the run. This allows the garbage collector to coalesce
228 	 * unreachable objects into a single file.
229 	 * <p>
230 	 * If an UNREACHABLE_GARBAGE pack is already larger than this limit it will
231 	 * be left alone by the garbage collector. This avoids unnecessary disk IO
232 	 * reading and copying the objects.
233 	 * <p>
234 	 * If limit is set to 0 the UNREACHABLE_GARBAGE coalesce is disabled.<br>
235 	 * If limit is set to {@link java.lang.Long#MAX_VALUE}, everything is
236 	 * coalesced.
237 	 * <p>
238 	 * Keeping unreachable garbage prevents race conditions with repository
239 	 * changes that may suddenly need an object whose only copy was stored in
240 	 * the UNREACHABLE_GARBAGE pack.
241 	 *
242 	 * @param limit
243 	 *            size in bytes.
244 	 * @return {@code this}
245 	 */
246 	public DfsGarbageCollector setCoalesceGarbageLimit(long limit) {
247 		coalesceGarbageLimit = limit;
248 		return this;
249 	}
250 
251 	/**
252 	 * Get time to live for garbage packs.
253 	 *
254 	 * @return garbage packs older than this limit (in milliseconds) will be
255 	 *         pruned as part of the garbage collection process if the value is
256 	 *         &gt; 0, otherwise garbage packs are retained.
257 	 */
258 	public long getGarbageTtlMillis() {
259 		return garbageTtlMillis;
260 	}
261 
262 	/**
263 	 * Set the time to live for garbage objects.
264 	 * <p>
265 	 * Any UNREACHABLE_GARBAGE older than this limit will be pruned at the end
266 	 * of the run.
267 	 * <p>
268 	 * If timeToLiveMillis is set to 0, UNREACHABLE_GARBAGE purging is disabled.
269 	 *
270 	 * @param ttl
271 	 *            Time to live whatever unit is specified.
272 	 * @param unit
273 	 *            The specified time unit.
274 	 * @return {@code this}
275 	 */
276 	public DfsGarbageCollector setGarbageTtl(long ttl, TimeUnit unit) {
277 		garbageTtlMillis = unit.toMillis(ttl);
278 		return this;
279 	}
280 
281 	/**
282 	 * Create a single new pack file containing all of the live objects.
283 	 * <p>
284 	 * This method safely decides which packs can be expired after the new pack
285 	 * is created by validating the references have not been modified in an
286 	 * incompatible way.
287 	 *
288 	 * @param pm
289 	 *            progress monitor to receive updates on as packing may take a
290 	 *            while, depending on the size of the repository.
291 	 * @return true if the repack was successful without race conditions. False
292 	 *         if a race condition was detected and the repack should be run
293 	 *         again later.
294 	 * @throws java.io.IOException
295 	 *             a new pack cannot be created.
296 	 */
297 	public boolean pack(ProgressMonitor pm) throws IOException {
298 		if (pm == null)
299 			pm = NullProgressMonitor.INSTANCE;
300 		if (packConfig.getIndexVersion() != 2)
301 			throw new IllegalStateException(
302 					JGitText.get().supportOnlyPackIndexVersion2);
303 
304 		startTimeMillis = SystemReader.getInstance().getCurrentTime();
305 		ctx = objdb.newReader();
306 		try {
307 			refdb.refresh();
308 			objdb.clearCache();
309 
310 			refsBefore = getAllRefs();
311 			readPacksBefore();
312 			readReftablesBefore();
313 
314 			Set<ObjectId> allHeads = new HashSet<>();
315 			allHeadsAndTags = new HashSet<>();
316 			allTags = new HashSet<>();
317 			nonHeads = new HashSet<>();
318 			tagTargets = new HashSet<>();
319 			for (Ref ref : refsBefore) {
320 				if (ref.isSymbolic() || ref.getObjectId() == null) {
321 					continue;
322 				}
323 				if (isHead(ref)) {
324 					allHeads.add(ref.getObjectId());
325 				} else if (isTag(ref)) {
326 					allTags.add(ref.getObjectId());
327 				} else {
328 					nonHeads.add(ref.getObjectId());
329 				}
330 				if (ref.getPeeledObjectId() != null) {
331 					tagTargets.add(ref.getPeeledObjectId());
332 				}
333 			}
334 			// Don't exclude tags that are also branch tips.
335 			allTags.removeAll(allHeads);
336 			allHeadsAndTags.addAll(allHeads);
337 			allHeadsAndTags.addAll(allTags);
338 
339 			// Hoist all branch tips and tags earlier in the pack file
340 			tagTargets.addAll(allHeadsAndTags);
341 
342 			// Combine the GC_REST objects into the GC pack if requested
343 			if (packConfig.getSinglePack()) {
344 				allHeadsAndTags.addAll(nonHeads);
345 				nonHeads.clear();
346 			}
347 
348 			boolean rollback = true;
349 			try {
350 				packHeads(pm);
351 				packRest(pm);
352 				packGarbage(pm);
353 				objdb.commitPack(newPackDesc, toPrune());
354 				rollback = false;
355 				return true;
356 			} finally {
357 				if (rollback)
358 					objdb.rollbackPack(newPackDesc);
359 			}
360 		} finally {
361 			ctx.close();
362 		}
363 	}
364 
365 	private Collection<Ref> getAllRefs() throws IOException {
366 		Collection<Ref> refs = refdb.getRefs();
367 		List<Ref> addl = refdb.getAdditionalRefs();
368 		if (!addl.isEmpty()) {
369 			List<Ref> all = new ArrayList<>(refs.size() + addl.size());
370 			all.addAll(refs);
371 			// add additional refs which start with refs/
372 			for (Ref r : addl) {
373 				if (r.getName().startsWith(Constants.R_REFS)) {
374 					all.add(r);
375 				}
376 			}
377 			return all;
378 		}
379 		return refs;
380 	}
381 
382 	private void readPacksBefore() throws IOException {
383 		DfsPackFile[] packs = objdb.getPacks();
384 		packsBefore = new ArrayList<>(packs.length);
385 		expiredGarbagePacks = new ArrayList<>(packs.length);
386 
387 		long now = SystemReader.getInstance().getCurrentTime();
388 		for (DfsPackFile p : packs) {
389 			DfsPackDescription d = p.getPackDescription();
390 			if (d.getPackSource() != UNREACHABLE_GARBAGE) {
391 				packsBefore.add(p);
392 			} else if (packIsExpiredGarbage(d, now)) {
393 				expiredGarbagePacks.add(p);
394 			} else if (packIsCoalesceableGarbage(d, now)) {
395 				packsBefore.add(p);
396 			}
397 		}
398 	}
399 
400 	private void readReftablesBefore() throws IOException {
401 		DfsReftable[] tables = objdb.getReftables();
402 		reftablesBefore = new ArrayList<>(Arrays.asList(tables));
403 	}
404 
405 	private boolean packIsExpiredGarbage(DfsPackDescription d, long now) {
406 		// Consider the garbage pack as expired when it's older than
407 		// garbagePackTtl. This check gives concurrent inserter threads
408 		// sufficient time to identify an object is not in the graph and should
409 		// have a new copy written, rather than relying on something from an
410 		// UNREACHABLE_GARBAGE pack.
411 		return d.getPackSource() == UNREACHABLE_GARBAGE
412 				&& garbageTtlMillis > 0
413 				&& now - d.getLastModified() >= garbageTtlMillis;
414 	}
415 
416 	private boolean packIsCoalesceableGarbage(DfsPackDescription d, long now) {
417 		// An UNREACHABLE_GARBAGE pack can be coalesced if its size is less than
418 		// the coalesceGarbageLimit and either garbageTtl is zero or if the pack
419 		// is created in a close time interval (on a single calendar day when
420 		// the garbageTtl is more than one day or one third of the garbageTtl).
421 		//
422 		// When the garbageTtl is more than 24 hours, garbage packs that are
423 		// created within a single calendar day are coalesced together. This
424 		// would make the effective ttl of the garbage pack as garbageTtl+23:59
425 		// and limit the number of garbage to a maximum number of
426 		// garbageTtl_in_days + 1 (assuming all of them are less than the size
427 		// of coalesceGarbageLimit).
428 		//
429 		// When the garbageTtl is less than or equal to 24 hours, garbage packs
430 		// that are created within a one third of garbageTtl are coalesced
431 		// together. This would make the effective ttl of the garbage packs as
432 		// garbageTtl + (garbageTtl / 3) and would limit the number of garbage
433 		// packs to a maximum number of 4 (assuming all of them are less than
434 		// the size of coalesceGarbageLimit).
435 
436 		if (d.getPackSource() != UNREACHABLE_GARBAGE
437 				|| d.getFileSize(PackExt.PACK) >= coalesceGarbageLimit) {
438 			return false;
439 		}
440 
441 		if (garbageTtlMillis == 0) {
442 			return true;
443 		}
444 
445 		long lastModified = d.getLastModified();
446 		long dayStartLastModified = dayStartInMillis(lastModified);
447 		long dayStartToday = dayStartInMillis(now);
448 
449 		if (dayStartLastModified != dayStartToday) {
450 			return false; // this pack is not created today.
451 		}
452 
453 		if (garbageTtlMillis > TimeUnit.DAYS.toMillis(1)) {
454 			return true; // ttl is more than one day and pack is created today.
455 		}
456 
457 		long timeInterval = garbageTtlMillis / 3;
458 		if (timeInterval == 0) {
459 			return false; // ttl is too small, don't try to coalesce.
460 		}
461 
462 		long modifiedTimeSlot = (lastModified - dayStartLastModified) / timeInterval;
463 		long presentTimeSlot = (now - dayStartToday) / timeInterval;
464 		return modifiedTimeSlot == presentTimeSlot;
465 	}
466 
467 	private static long dayStartInMillis(long timeInMillis) {
468 		Calendar cal = new GregorianCalendar(
469 				SystemReader.getInstance().getTimeZone());
470 		cal.setTimeInMillis(timeInMillis);
471 		cal.set(Calendar.HOUR_OF_DAY, 0);
472 		cal.set(Calendar.MINUTE, 0);
473 		cal.set(Calendar.SECOND, 0);
474 		cal.set(Calendar.MILLISECOND, 0);
475 		return cal.getTimeInMillis();
476 	}
477 
478 	/**
479 	 * Get all of the source packs that fed into this compaction.
480 	 *
481 	 * @return all of the source packs that fed into this compaction.
482 	 */
483 	public Set<DfsPackDescription> getSourcePacks() {
484 		return toPrune();
485 	}
486 
487 	/**
488 	 * Get new packs created by this compaction.
489 	 *
490 	 * @return new packs created by this compaction.
491 	 */
492 	public List<DfsPackDescription> getNewPacks() {
493 		return newPackDesc;
494 	}
495 
496 	/**
497 	 * Get statistics corresponding to the {@link #getNewPacks()}.
498 	 * <p>
499 	 * The elements can be null if the stat is not available for the pack file.
500 	 *
501 	 * @return statistics corresponding to the {@link #getNewPacks()}.
502 	 */
503 	public List<PackStatistics> getNewPackStatistics() {
504 		return newPackStats;
505 	}
506 
507 	private Set<DfsPackDescription> toPrune() {
508 		Set<DfsPackDescription> toPrune = new HashSet<>();
509 		for (DfsPackFile pack : packsBefore) {
510 			toPrune.add(pack.getPackDescription());
511 		}
512 		if (reftableConfig != null) {
513 			for (DfsReftable table : reftablesBefore) {
514 				toPrune.add(table.getPackDescription());
515 			}
516 		}
517 		for (DfsPackFile pack : expiredGarbagePacks) {
518 			toPrune.add(pack.getPackDescription());
519 		}
520 		return toPrune;
521 	}
522 
523 	private void packHeads(ProgressMonitor pm) throws IOException {
524 		if (allHeadsAndTags.isEmpty()) {
525 			writeReftable();
526 			return;
527 		}
528 
529 		try (PackWriter pw = newPackWriter()) {
530 			pw.setTagTargets(tagTargets);
531 			pw.preparePack(pm, allHeadsAndTags, NONE, NONE, allTags);
532 			if (0 < pw.getObjectCount()) {
533 				long estSize = estimateGcPackSize(INSERT, RECEIVE, COMPACT, GC);
534 				writePack(GC, pw, pm, estSize);
535 			} else {
536 				writeReftable();
537 			}
538 		}
539 	}
540 
541 	private void packRest(ProgressMonitor pm) throws IOException {
542 		if (nonHeads.isEmpty())
543 			return;
544 
545 		try (PackWriter pw = newPackWriter()) {
546 			for (ObjectIdSet packedObjs : newPackObj)
547 				pw.excludeObjects(packedObjs);
548 			pw.preparePack(pm, nonHeads, allHeadsAndTags);
549 			if (0 < pw.getObjectCount())
550 				writePack(GC_REST, pw, pm,
551 						estimateGcPackSize(INSERT, RECEIVE, COMPACT, GC_REST));
552 		}
553 	}
554 
555 	private void packGarbage(ProgressMonitor pm) throws IOException {
556 		PackConfig cfg = new PackConfig(packConfig);
557 		cfg.setReuseDeltas(true);
558 		cfg.setReuseObjects(true);
559 		cfg.setDeltaCompress(false);
560 		cfg.setBuildBitmaps(false);
561 
562 		try (PackWriter pw = new PackWriter(cfg, ctx);
563 				RevWalk pool = new RevWalk(ctx)) {
564 			pw.setDeltaBaseAsOffset(true);
565 			pw.setReuseDeltaCommits(true);
566 			pm.beginTask(JGitText.get().findingGarbage, objectsBefore());
567 			long estimatedPackSize = 12 + 20; // header and trailer sizes.
568 			for (DfsPackFile oldPack : packsBefore) {
569 				PackIndex oldIdx = oldPack.getPackIndex(ctx);
570 				PackReverseIndex oldRevIdx = oldPack.getReverseIdx(ctx);
571 				long maxOffset = oldPack.getPackDescription().getFileSize(PACK)
572 						- 20; // pack size - trailer size.
573 				for (PackIndex.MutableEntry ent : oldIdx) {
574 					pm.update(1);
575 					ObjectId id = ent.toObjectId();
576 					if (pool.lookupOrNull(id) != null || anyPackHas(id))
577 						continue;
578 
579 					long offset = ent.getOffset();
580 					int type = oldPack.getObjectType(ctx, offset);
581 					pw.addObject(pool.lookupAny(id, type));
582 					long objSize = oldRevIdx.findNextOffset(offset, maxOffset)
583 							- offset;
584 					estimatedPackSize += objSize;
585 				}
586 			}
587 			pm.endTask();
588 			if (0 < pw.getObjectCount())
589 				writePack(UNREACHABLE_GARBAGE, pw, pm, estimatedPackSize);
590 		}
591 	}
592 
593 	private boolean anyPackHas(AnyObjectId id) {
594 		for (ObjectIdSet packedObjs : newPackObj)
595 			if (packedObjs.contains(id))
596 				return true;
597 		return false;
598 	}
599 
600 	private static boolean isHead(Ref ref) {
601 		return ref.getName().startsWith(Constants.R_HEADS);
602 	}
603 
604 	private static boolean isTag(Ref ref) {
605 		return ref.getName().startsWith(Constants.R_TAGS);
606 	}
607 
608 	private int objectsBefore() {
609 		int cnt = 0;
610 		for (DfsPackFile p : packsBefore)
611 			cnt += (int) p.getPackDescription().getObjectCount();
612 		return cnt;
613 	}
614 
615 	private PackWriter newPackWriter() {
616 		PackWriter pw = new PackWriter(packConfig, ctx);
617 		pw.setDeltaBaseAsOffset(true);
618 		pw.setReuseDeltaCommits(false);
619 		return pw;
620 	}
621 
622 	private long estimateGcPackSize(PackSource first, PackSource... rest) {
623 		EnumSet<PackSource> sourceSet = EnumSet.of(first, rest);
624 		// Every pack file contains 12 bytes of header and 20 bytes of trailer.
625 		// Include the final pack file header and trailer size here and ignore
626 		// the same from individual pack files.
627 		long size = 32;
628 		for (DfsPackDescription pack : getSourcePacks()) {
629 			if (sourceSet.contains(pack.getPackSource())) {
630 				size += pack.getFileSize(PACK) - 32;
631 			}
632 		}
633 		return size;
634 	}
635 
636 	private DfsPackDescription writePack(PackSource source, PackWriter pw,
637 			ProgressMonitor pm, long estimatedPackSize) throws IOException {
638 		DfsPackDescription pack = repo.getObjectDatabase().newPack(source,
639 				estimatedPackSize);
640 
641 		if (source == GC && reftableConfig != null) {
642 			writeReftable(pack);
643 		}
644 
645 		try (DfsOutputStream out = objdb.writeFile(pack, PACK)) {
646 			pw.writePack(pm, pm, out);
647 			pack.addFileExt(PACK);
648 			pack.setBlockSize(PACK, out.blockSize());
649 		}
650 
651 		try (DfsOutputStream out = objdb.writeFile(pack, INDEX)) {
652 			CountingOutputStream cnt = new CountingOutputStream(out);
653 			pw.writeIndex(cnt);
654 			pack.addFileExt(INDEX);
655 			pack.setFileSize(INDEX, cnt.getCount());
656 			pack.setBlockSize(INDEX, out.blockSize());
657 			pack.setIndexVersion(pw.getIndexVersion());
658 		}
659 
660 		if (pw.prepareBitmapIndex(pm)) {
661 			try (DfsOutputStream out = objdb.writeFile(pack, BITMAP_INDEX)) {
662 				CountingOutputStream cnt = new CountingOutputStream(out);
663 				pw.writeBitmapIndex(cnt);
664 				pack.addFileExt(BITMAP_INDEX);
665 				pack.setFileSize(BITMAP_INDEX, cnt.getCount());
666 				pack.setBlockSize(BITMAP_INDEX, out.blockSize());
667 			}
668 		}
669 
670 		PackStatistics stats = pw.getStatistics();
671 		pack.setPackStats(stats);
672 		pack.setLastModified(startTimeMillis);
673 		newPackDesc.add(pack);
674 		newPackStats.add(stats);
675 		newPackObj.add(pw.getObjectSet());
676 		return pack;
677 	}
678 
679 	private void writeReftable() throws IOException {
680 		if (reftableConfig != null) {
681 			DfsPackDescription pack = objdb.newPack(GC);
682 			newPackDesc.add(pack);
683 			newPackStats.add(null);
684 			writeReftable(pack);
685 		}
686 	}
687 
688 	private void writeReftable(DfsPackDescription pack) throws IOException {
689 		if (convertToReftable && !hasGcReftable()) {
690 			writeReftable(pack, refsBefore);
691 			return;
692 		}
693 
694 		try (DfsReftableStack stack = DfsReftableStack.open(ctx, reftablesBefore);
695 		     DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) {
696 			ReftableCompactor compact = new ReftableCompactor(out);
697 			compact.addAll(stack.readers());
698 			compact.setIncludeDeletes(includeDeletes);
699 			compact.setConfig(configureReftable(reftableConfig, out));
700 			compact.compact();
701 			pack.addFileExt(REFTABLE);
702 			pack.setReftableStats(compact.getStats());
703 		}
704 	}
705 
706 	private boolean hasGcReftable() {
707 		for (DfsReftable table : reftablesBefore) {
708 			if (table.getPackDescription().getPackSource() == GC) {
709 				return true;
710 			}
711 		}
712 		return false;
713 	}
714 
715 	private void writeReftable(DfsPackDescription pack, Collection<Ref> refs)
716 			throws IOException {
717 		try (DfsOutputStream out = objdb.writeFile(pack, REFTABLE)) {
718 			ReftableConfig cfg = configureReftable(reftableConfig, out);
719 			ReftableWriter writer = new ReftableWriter(cfg, out)
720 					.setMinUpdateIndex(reftableInitialMinUpdateIndex)
721 					.setMaxUpdateIndex(reftableInitialMaxUpdateIndex).begin()
722 					.sortAndWriteRefs(refs).finish();
723 			pack.addFileExt(REFTABLE);
724 			pack.setReftableStats(writer.getStats());
725 		}
726 	}
727 }