View Javadoc
1   /*
2    * Copyright (C) 2008-2011, Google Inc.
3    * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
4    * and other copyright owners as documented in the project's IP log.
5    *
6    * This program and the accompanying materials are made available
7    * under the terms of the Eclipse Distribution License v1.0 which
8    * accompanies this distribution, is reproduced below, and is
9    * available at http://www.eclipse.org/org/documents/edl-v10.php
10   *
11   * All rights reserved.
12   *
13   * Redistribution and use in source and binary forms, with or
14   * without modification, are permitted provided that the following
15   * conditions are met:
16   *
17   * - Redistributions of source code must retain the above copyright
18   *   notice, this list of conditions and the following disclaimer.
19   *
20   * - Redistributions in binary form must reproduce the above
21   *   copyright notice, this list of conditions and the following
22   *   disclaimer in the documentation and/or other materials provided
23   *   with the distribution.
24   *
25   * - Neither the name of the Eclipse Foundation, Inc. nor the
26   *   names of its contributors may be used to endorse or promote
27   *   products derived from this software without specific prior
28   *   written permission.
29   *
30   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
31   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
32   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
34   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
35   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
37   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
38   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
39   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
41   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
42   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43   */
44  
45  package org.eclipse.jgit.internal.storage.dfs;
46  
47  import java.io.IOException;
48  import java.util.concurrent.atomic.AtomicLong;
49  import java.util.concurrent.atomic.AtomicReference;
50  import java.util.concurrent.atomic.AtomicReferenceArray;
51  import java.util.concurrent.locks.ReentrantLock;
52  import java.util.function.Consumer;
53  import java.util.stream.LongStream;
54  
55  import org.eclipse.jgit.internal.JGitText;
56  import org.eclipse.jgit.internal.storage.pack.PackExt;
57  
58  /**
59   * Caches slices of a
60   * {@link org.eclipse.jgit.internal.storage.dfs.BlockBasedFile} in memory for
61   * faster read access.
62   * <p>
63   * The DfsBlockCache serves as a Java based "buffer cache", loading segments of
64   * a BlockBasedFile into the JVM heap prior to use. As JGit often wants to do
65   * reads of only tiny slices of a file, the DfsBlockCache tries to smooth out
66   * these tiny reads into larger block-sized IO operations.
67   * <p>
68   * Whenever a cache miss occurs, loading is invoked by exactly one thread for
69   * the given <code>(DfsStreamKey,position)</code> key tuple. This is ensured by
70   * an array of locks, with the tuple hashed to a lock instance.
71   * <p>
72   * Its too expensive during object access to be accurate with a least recently
73   * used (LRU) algorithm. Strictly ordering every read is a lot of overhead that
74   * typically doesn't yield a corresponding benefit to the application. This
75   * cache implements a clock replacement algorithm, giving each block one chance
76   * to have been accessed during a sweep of the cache to save itself from
77   * eviction.
78   * <p>
79   * Entities created by the cache are held under hard references, preventing the
80   * Java VM from clearing anything. Blocks are discarded by the replacement
81   * algorithm when adding a new block would cause the cache to exceed its
82   * configured maximum size.
83   * <p>
84   * The key tuple is passed through to methods as a pair of parameters rather
85   * than as a single Object, thus reducing the transient memory allocations of
86   * callers. It is more efficient to avoid the allocation, as we can't be 100%
87   * sure that a JIT would be able to stack-allocate a key tuple.
88   * <p>
89   * The internal hash table does not expand at runtime, instead it is fixed in
90   * size at cache creation time. The internal lock table used to gate load
91   * invocations is also fixed in size.
92   */
93  public final class DfsBlockCache {
94  	private static volatile DfsBlockCache cache;
95  
96  	static {
97  		reconfigure(new DfsBlockCacheConfig());
98  	}
99  
100 	/**
101 	 * Modify the configuration of the window cache.
102 	 * <p>
103 	 * The new configuration is applied immediately, and the existing cache is
104 	 * cleared.
105 	 *
106 	 * @param cfg
107 	 *            the new window cache configuration.
108 	 * @throws java.lang.IllegalArgumentException
109 	 *             the cache configuration contains one or more invalid
110 	 *             settings, usually too low of a limit.
111 	 */
112 	public static void reconfigure(DfsBlockCacheConfig cfg) {
113 		cache = new DfsBlockCache(cfg);
114 	}
115 
116 	/**
117 	 * Get the currently active DfsBlockCache.
118 	 *
119 	 * @return the currently active DfsBlockCache.
120 	 */
121 	public static DfsBlockCache getInstance() {
122 		return cache;
123 	}
124 
125 	/** Number of entries in {@link #table}. */
126 	private final int tableSize;
127 
128 	/** Hash bucket directory; entries are chained below. */
129 	private final AtomicReferenceArray<HashEntry> table;
130 
131 	/**
132 	 * Locks to prevent concurrent loads for same (PackFile,position) block. The
133 	 * number of locks is {@link DfsBlockCacheConfig#getConcurrencyLevel()} to
134 	 * cap the overall concurrent block loads.
135 	 */
136 	private final ReentrantLock[] loadLocks;
137 
138 	/**
139 	 * A separate pool of locks to prevent concurrent loads for same index or bitmap from PackFile.
140 	 */
141 	private final ReentrantLock[] refLocks;
142 
143 	/** Maximum number of bytes the cache should hold. */
144 	private final long maxBytes;
145 
146 	/** Pack files smaller than this size can be copied through the cache. */
147 	private final long maxStreamThroughCache;
148 
149 	/**
150 	 * Suggested block size to read from pack files in.
151 	 * <p>
152 	 * If a pack file does not have a native block size, this size will be used.
153 	 * <p>
154 	 * If a pack file has a native size, a whole multiple of the native size
155 	 * will be used until it matches this size.
156 	 * <p>
157 	 * The value for blockSize must be a power of 2.
158 	 */
159 	private final int blockSize;
160 
161 	/** As {@link #blockSize} is a power of 2, bits to shift for a / blockSize. */
162 	private final int blockSizeShift;
163 
164 	/**
165 	 * Number of times a block was found in the cache, per pack file extension.
166 	 */
167 	private final AtomicReference<AtomicLong[]> statHit;
168 
169 	/**
170 	 * Number of times a block was not found, and had to be loaded, per pack
171 	 * file extension.
172 	 */
173 	private final AtomicReference<AtomicLong[]> statMiss;
174 
175 	/**
176 	 * Number of blocks evicted due to cache being full, per pack file
177 	 * extension.
178 	 */
179 	private final AtomicReference<AtomicLong[]> statEvict;
180 
181 	/**
182 	 * Number of bytes currently loaded in the cache, per pack file extension.
183 	 */
184 	private final AtomicReference<AtomicLong[]> liveBytes;
185 
186 	/** Protects the clock and its related data. */
187 	private final ReentrantLock clockLock;
188 
189 	/**
190 	 * A consumer of object reference lock wait time milliseconds.  May be used to build a metric.
191 	 */
192 	private final Consumer<Long> refLockWaitTime;
193 
194 	/** Current position of the clock. */
195 	private Ref clockHand;
196 
197 	@SuppressWarnings("unchecked")
198 	private DfsBlockCache(DfsBlockCacheConfig cfg) {
199 		tableSize = tableSize(cfg);
200 		if (tableSize < 1) {
201 			throw new IllegalArgumentException(JGitText.get().tSizeMustBeGreaterOrEqual1);
202 		}
203 
204 		table = new AtomicReferenceArray<>(tableSize);
205 		loadLocks = new ReentrantLock[cfg.getConcurrencyLevel()];
206 		for (int i = 0; i < loadLocks.length; i++) {
207 			loadLocks[i] = new ReentrantLock(true /* fair */);
208 		}
209 		refLocks = new ReentrantLock[cfg.getConcurrencyLevel()];
210 		for (int i = 0; i < refLocks.length; i++) {
211 			refLocks[i] = new ReentrantLock(true /* fair */);
212 		}
213 
214 		maxBytes = cfg.getBlockLimit();
215 		maxStreamThroughCache = (long) (maxBytes * cfg.getStreamRatio());
216 		blockSize = cfg.getBlockSize();
217 		blockSizeShift = Integer.numberOfTrailingZeros(blockSize);
218 
219 		clockLock = new ReentrantLock(true /* fair */);
220 		String none = ""; //$NON-NLS-1$
221 		clockHand = new Ref<>(
222 				DfsStreamKey.of(new DfsRepositoryDescription(none), none, null),
223 				-1, 0, null);
224 		clockHand.next = clockHand;
225 
226 		statHit = new AtomicReference<>(newCounters());
227 		statMiss = new AtomicReference<>(newCounters());
228 		statEvict = new AtomicReference<>(newCounters());
229 		liveBytes = new AtomicReference<>(newCounters());
230 
231 		refLockWaitTime = cfg.getRefLockWaitTimeConsumer();
232 	}
233 
234 	boolean shouldCopyThroughCache(long length) {
235 		return length <= maxStreamThroughCache;
236 	}
237 
238 	/**
239 	 * Get total number of bytes in the cache, per pack file extension.
240 	 *
241 	 * @return total number of bytes in the cache, per pack file extension.
242 	 */
243 	public long[] getCurrentSize() {
244 		return getStatVals(liveBytes);
245 	}
246 
247 	/**
248 	 * Get 0..100, defining how full the cache is.
249 	 *
250 	 * @return 0..100, defining how full the cache is.
251 	 */
252 	public long getFillPercentage() {
253 		return LongStream.of(getCurrentSize()).sum() * 100 / maxBytes;
254 	}
255 
256 	/**
257 	 * Get number of requests for items in the cache, per pack file extension.
258 	 *
259 	 * @return number of requests for items in the cache, per pack file
260 	 *         extension.
261 	 */
262 	public long[] getHitCount() {
263 		return getStatVals(statHit);
264 	}
265 
266 	/**
267 	 * Get number of requests for items not in the cache, per pack file
268 	 * extension.
269 	 *
270 	 * @return number of requests for items not in the cache, per pack file
271 	 *         extension.
272 	 */
273 	public long[] getMissCount() {
274 		return getStatVals(statMiss);
275 	}
276 
277 	/**
278 	 * Get total number of requests (hit + miss), per pack file extension.
279 	 *
280 	 * @return total number of requests (hit + miss), per pack file extension.
281 	 */
282 	public long[] getTotalRequestCount() {
283 		AtomicLong[] hit = statHit.get();
284 		AtomicLong[] miss = statMiss.get();
285 		long[] cnt = new long[Math.max(hit.length, miss.length)];
286 		for (int i = 0; i < hit.length; i++) {
287 			cnt[i] += hit[i].get();
288 		}
289 		for (int i = 0; i < miss.length; i++) {
290 			cnt[i] += miss[i].get();
291 		}
292 		return cnt;
293 	}
294 
295 	/**
296 	 * Get hit ratios
297 	 *
298 	 * @return hit ratios
299 	 */
300 	public long[] getHitRatio() {
301 		AtomicLong[] hit = statHit.get();
302 		AtomicLong[] miss = statMiss.get();
303 		long[] ratio = new long[Math.max(hit.length, miss.length)];
304 		for (int i = 0; i < ratio.length; i++) {
305 			if (i >= hit.length) {
306 				ratio[i] = 0;
307 			} else if (i >= miss.length) {
308 				ratio[i] = 100;
309 			} else {
310 				long hitVal = hit[i].get();
311 				long missVal = miss[i].get();
312 				long total = hitVal + missVal;
313 				ratio[i] = total == 0 ? 0 : hitVal * 100 / total;
314 			}
315 		}
316 		return ratio;
317 	}
318 
319 	/**
320 	 * Get number of evictions performed due to cache being full, per pack file
321 	 * extension.
322 	 *
323 	 * @return number of evictions performed due to cache being full, per pack
324 	 *         file extension.
325 	 */
326 	public long[] getEvictions() {
327 		return getStatVals(statEvict);
328 	}
329 
330 	/**
331 	 * Quickly check if the cache contains block 0 of the given stream.
332 	 * <p>
333 	 * This can be useful for sophisticated pre-read algorithms to quickly
334 	 * determine if a file is likely already in cache, especially small
335 	 * reftables which may be smaller than a typical DFS block size.
336 	 *
337 	 * @param key
338 	 *            the file to check.
339 	 * @return true if block 0 (the first block) is in the cache.
340 	 */
341 	public boolean hasBlock0(DfsStreamKey key) {
342 		HashEntry e1 = table.get(slot(key, 0));
343 		DfsBlock v = scan(e1, key, 0);
344 		return v != null && v.contains(key, 0);
345 	}
346 
347 	private int hash(int packHash, long off) {
348 		return packHash + (int) (off >>> blockSizeShift);
349 	}
350 
351 	int getBlockSize() {
352 		return blockSize;
353 	}
354 
355 	private static int tableSize(DfsBlockCacheConfig cfg) {
356 		final int wsz = cfg.getBlockSize();
357 		final long limit = cfg.getBlockLimit();
358 		if (wsz <= 0) {
359 			throw new IllegalArgumentException(JGitText.get().invalidWindowSize);
360 		}
361 		if (limit < wsz) {
362 			throw new IllegalArgumentException(JGitText.get().windowSizeMustBeLesserThanLimit);
363 		}
364 		return (int) Math.min(5 * (limit / wsz) / 2, Integer.MAX_VALUE);
365 	}
366 
367 	/**
368 	 * Look up a cached object, creating and loading it if it doesn't exist.
369 	 *
370 	 * @param file
371 	 *            the pack that "contains" the cached object.
372 	 * @param position
373 	 *            offset within <code>pack</code> of the object.
374 	 * @param ctx
375 	 *            current thread's reader.
376 	 * @param fileChannel
377 	 *            supplier for channel to read {@code pack}.
378 	 * @return the object reference.
379 	 * @throws IOException
380 	 *             the reference was not in the cache and could not be loaded.
381 	 */
382 	DfsBlock getOrLoad(BlockBasedFile file, long position, DfsReader ctx,
383 			ReadableChannelSupplier fileChannel) throws IOException {
384 		final long requestedPosition = position;
385 		position = file.alignToBlock(position);
386 
387 		DfsStreamKey key = file.key;
388 		int slot = slot(key, position);
389 		HashEntry e1 = table.get(slot);
390 		DfsBlock v = scan(e1, key, position);
391 		if (v != null && v.contains(key, requestedPosition)) {
392 			ctx.stats.blockCacheHit++;
393 			getStat(statHit, key).incrementAndGet();
394 			return v;
395 		}
396 
397 		reserveSpace(blockSize, key);
398 		ReentrantLock regionLock = lockFor(key, position);
399 		regionLock.lock();
400 		try {
401 			HashEntry e2 = table.get(slot);
402 			if (e2 != e1) {
403 				v = scan(e2, key, position);
404 				if (v != null) {
405 					ctx.stats.blockCacheHit++;
406 					getStat(statHit, key).incrementAndGet();
407 					creditSpace(blockSize, key);
408 					return v;
409 				}
410 			}
411 
412 			getStat(statMiss, key).incrementAndGet();
413 			boolean credit = true;
414 			try {
415 				v = file.readOneBlock(requestedPosition, ctx,
416 						fileChannel.get());
417 				credit = false;
418 			} finally {
419 				if (credit) {
420 					creditSpace(blockSize, key);
421 				}
422 			}
423 			if (position != v.start) {
424 				// The file discovered its blockSize and adjusted.
425 				position = v.start;
426 				slot = slot(key, position);
427 				e2 = table.get(slot);
428 			}
429 
430 			Ref<DfsBlock> ref = new Ref<>(key, position, v.size(), v);
431 			ref.hot = true;
432 			for (;;) {
433 				HashEntry n = new HashEntry(clean(e2), ref);
434 				if (table.compareAndSet(slot, e2, n)) {
435 					break;
436 				}
437 				e2 = table.get(slot);
438 			}
439 			addToClock(ref, blockSize - v.size());
440 		} finally {
441 			regionLock.unlock();
442 		}
443 
444 		// If the block size changed from the default, it is possible the block
445 		// that was loaded is the wrong block for the requested position.
446 		if (v.contains(file.key, requestedPosition)) {
447 			return v;
448 		}
449 		return getOrLoad(file, requestedPosition, ctx, fileChannel);
450 	}
451 
452 	@SuppressWarnings("unchecked")
453 	private void reserveSpace(int reserve, DfsStreamKey key) {
454 		clockLock.lock();
455 		try {
456 			long live = LongStream.of(getCurrentSize()).sum() + reserve;
457 			if (maxBytes < live) {
458 				Ref prev = clockHand;
459 				Ref hand = clockHand.next;
460 				do {
461 					if (hand.hot) {
462 						// Value was recently touched. Clear
463 						// hot and give it another chance.
464 						hand.hot = false;
465 						prev = hand;
466 						hand = hand.next;
467 						continue;
468 					} else if (prev == hand)
469 						break;
470 
471 					// No recent access since last scan, kill
472 					// value and remove from clock.
473 					Ref dead = hand;
474 					hand = hand.next;
475 					prev.next = hand;
476 					dead.next = null;
477 					dead.value = null;
478 					live -= dead.size;
479 					getStat(liveBytes, dead.key).addAndGet(-dead.size);
480 					getStat(statEvict, dead.key).incrementAndGet();
481 				} while (maxBytes < live);
482 				clockHand = prev;
483 			}
484 			getStat(liveBytes, key).addAndGet(reserve);
485 		} finally {
486 			clockLock.unlock();
487 		}
488 	}
489 
490 	private void creditSpace(int credit, DfsStreamKey key) {
491 		clockLock.lock();
492 		try {
493 			getStat(liveBytes, key).addAndGet(-credit);
494 		} finally {
495 			clockLock.unlock();
496 		}
497 	}
498 
499 	@SuppressWarnings("unchecked")
500 	private void addToClock(Ref ref, int credit) {
501 		clockLock.lock();
502 		try {
503 			if (credit != 0) {
504 				getStat(liveBytes, ref.key).addAndGet(-credit);
505 			}
506 			Ref ptr = clockHand;
507 			ref.next = ptr.next;
508 			ptr.next = ref;
509 			clockHand = ref;
510 		} finally {
511 			clockLock.unlock();
512 		}
513 	}
514 
515 	void put(DfsBlock v) {
516 		put(v.stream, v.start, v.size(), v);
517 	}
518 
519 	/**
520 	 * Look up a cached object, creating and loading it if it doesn't exist.
521 	 *
522 	 * @param key
523 	 *            the stream key of the pack.
524 	 * @param loader
525 	 *            the function to load the reference.
526 	 * @return the object reference.
527 	 * @throws IOException
528 	 *             the reference was not in the cache and could not be loaded.
529 	 */
530 	<T> Ref<T> getOrLoadRef(DfsStreamKey key, RefLoader<T> loader)
531 			throws IOException {
532 		int slot = slot(key, 0);
533 		HashEntry e1 = table.get(slot);
534 		Ref<T> ref = scanRef(e1, key, 0);
535 		if (ref != null) {
536 			getStat(statHit, key).incrementAndGet();
537 			return ref;
538 		}
539 
540 		ReentrantLock regionLock = lockForRef(key);
541 		long lockStart = System.currentTimeMillis();
542 		regionLock.lock();
543 		try {
544 			HashEntry e2 = table.get(slot);
545 			if (e2 != e1) {
546 				ref = scanRef(e2, key, 0);
547 				if (ref != null) {
548 					getStat(statHit, key).incrementAndGet();
549 					return ref;
550 				}
551 			}
552 
553 			if (refLockWaitTime != null) {
554 				refLockWaitTime.accept(
555 						Long.valueOf(System.currentTimeMillis() - lockStart));
556 			}
557 			getStat(statMiss, key).incrementAndGet();
558 			ref = loader.load();
559 			ref.hot = true;
560 			// Reserve after loading to get the size of the object
561 			reserveSpace(ref.size, key);
562 			for (;;) {
563 				HashEntry n = new HashEntry(clean(e2), ref);
564 				if (table.compareAndSet(slot, e2, n)) {
565 					break;
566 				}
567 				e2 = table.get(slot);
568 			}
569 			addToClock(ref, 0);
570 		} finally {
571 			regionLock.unlock();
572 		}
573 		return ref;
574 	}
575 
576 	<T> Ref<T> putRef(DfsStreamKey key, long size, T v) {
577 		return put(key, 0, (int) Math.min(size, Integer.MAX_VALUE), v);
578 	}
579 
580 	<T> Ref<T> put(DfsStreamKey key, long pos, int size, T v) {
581 		int slot = slot(key, pos);
582 		HashEntry e1 = table.get(slot);
583 		Ref<T> ref = scanRef(e1, key, pos);
584 		if (ref != null) {
585 			return ref;
586 		}
587 
588 		reserveSpace(size, key);
589 		ReentrantLock regionLock = lockFor(key, pos);
590 		regionLock.lock();
591 		try {
592 			HashEntry e2 = table.get(slot);
593 			if (e2 != e1) {
594 				ref = scanRef(e2, key, pos);
595 				if (ref != null) {
596 					creditSpace(size, key);
597 					return ref;
598 				}
599 			}
600 
601 			ref = new Ref<>(key, pos, size, v);
602 			ref.hot = true;
603 			for (;;) {
604 				HashEntry n = new HashEntry(clean(e2), ref);
605 				if (table.compareAndSet(slot, e2, n)) {
606 					break;
607 				}
608 				e2 = table.get(slot);
609 			}
610 			addToClock(ref, 0);
611 		} finally {
612 			regionLock.unlock();
613 		}
614 		return ref;
615 	}
616 
617 	boolean contains(DfsStreamKey key, long position) {
618 		return scan(table.get(slot(key, position)), key, position) != null;
619 	}
620 
621 	@SuppressWarnings("unchecked")
622 	<T> T get(DfsStreamKey key, long position) {
623 		T val = (T) scan(table.get(slot(key, position)), key, position);
624 		if (val == null) {
625 			getStat(statMiss, key).incrementAndGet();
626 		} else {
627 			getStat(statHit, key).incrementAndGet();
628 		}
629 		return val;
630 	}
631 
632 	private <T> T scan(HashEntry n, DfsStreamKey key, long position) {
633 		Ref<T> r = scanRef(n, key, position);
634 		return r != null ? r.get() : null;
635 	}
636 
637 	@SuppressWarnings("unchecked")
638 	private <T> Ref<T> scanRef(HashEntry n, DfsStreamKey key, long position) {
639 		for (; n != null; n = n.next) {
640 			Ref<T> r = n.ref;
641 			if (r.position == position && r.key.equals(key)) {
642 				return r.get() != null ? r : null;
643 			}
644 		}
645 		return null;
646 	}
647 
648 	private int slot(DfsStreamKey key, long position) {
649 		return (hash(key.hash, position) >>> 1) % tableSize;
650 	}
651 
652 	private ReentrantLock lockFor(DfsStreamKey key, long position) {
653 		return loadLocks[(hash(key.hash, position) >>> 1) % loadLocks.length];
654 	}
655 
656 	private ReentrantLock lockForRef(DfsStreamKey key) {
657 		return refLocks[(key.hash >>> 1) % refLocks.length];
658 	}
659 
660 	private static AtomicLong[] newCounters() {
661 		AtomicLong[] ret = new AtomicLong[PackExt.values().length];
662 		for (int i = 0; i < ret.length; i++) {
663 			ret[i] = new AtomicLong();
664 		}
665 		return ret;
666 	}
667 
668 	private static AtomicLong getStat(AtomicReference<AtomicLong[]> stats,
669 			DfsStreamKey key) {
670 		int pos = key.packExtPos;
671 		while (true) {
672 			AtomicLong[] vals = stats.get();
673 			if (pos < vals.length) {
674 				return vals[pos];
675 			}
676 			AtomicLong[] expect = vals;
677 			vals = new AtomicLong[Math.max(pos + 1, PackExt.values().length)];
678 			System.arraycopy(expect, 0, vals, 0, expect.length);
679 			for (int i = expect.length; i < vals.length; i++) {
680 				vals[i] = new AtomicLong();
681 			}
682 			if (stats.compareAndSet(expect, vals)) {
683 				return vals[pos];
684 			}
685 		}
686 	}
687 
688 	private static long[] getStatVals(AtomicReference<AtomicLong[]> stat) {
689 		AtomicLong[] stats = stat.get();
690 		long[] cnt = new long[stats.length];
691 		for (int i = 0; i < stats.length; i++) {
692 			cnt[i] = stats[i].get();
693 		}
694 		return cnt;
695 	}
696 
697 	private static HashEntry clean(HashEntry top) {
698 		while (top != null && top.ref.next == null)
699 			top = top.next;
700 		if (top == null) {
701 			return null;
702 		}
703 		HashEntry n = clean(top.next);
704 		return n == top.next ? top : new HashEntry(n, top.ref);
705 	}
706 
707 	private static final class HashEntry {
708 		/** Next entry in the hash table's chain list. */
709 		final HashEntry next;
710 
711 		/** The referenced object. */
712 		final Ref ref;
713 
714 		HashEntry(HashEntry n, Ref r) {
715 			next = n;
716 			ref = r;
717 		}
718 	}
719 
720 	static final class Ref<T> {
721 		final DfsStreamKey key;
722 		final long position;
723 		final int size;
724 		volatile T value;
725 		Ref next;
726 		volatile boolean hot;
727 
728 		Ref(DfsStreamKey key, long position, int size, T v) {
729 			this.key = key;
730 			this.position = position;
731 			this.size = size;
732 			this.value = v;
733 		}
734 
735 		T get() {
736 			T v = value;
737 			if (v != null) {
738 				hot = true;
739 			}
740 			return v;
741 		}
742 
743 		boolean has() {
744 			return value != null;
745 		}
746 	}
747 
748 	@FunctionalInterface
749 	interface RefLoader<T> {
750 		Ref<T> load() throws IOException;
751 	}
752 
753 	/**
754 	 * Supplier for readable channel
755 	 */
756 	@FunctionalInterface
757 	interface ReadableChannelSupplier {
758 		/**
759 		 * @return ReadableChannel
760 		 * @throws IOException
761 		 */
762 		ReadableChannel get() throws IOException;
763 	}
764 }