1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45 package org.eclipse.jgit.internal.storage.dfs;
46
47 import java.io.IOException;
48 import java.util.Collection;
49 import java.util.Collections;
50 import java.util.Map;
51 import java.util.concurrent.ConcurrentHashMap;
52 import java.util.concurrent.atomic.AtomicLong;
53 import java.util.concurrent.atomic.AtomicReferenceArray;
54 import java.util.concurrent.locks.ReentrantLock;
55
56 import org.eclipse.jgit.internal.JGitText;
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91 public final class DfsBlockCache {
92 private static volatile DfsBlockCache cache;
93
94 static {
95 reconfigure(new DfsBlockCacheConfig());
96 }
97
98
99
100
101
102
103
104
105
106
107
108
109
110 public static void reconfigure(DfsBlockCacheConfig cfg) {
111 DfsBlockCache nc = new DfsBlockCache(cfg);
112 DfsBlockCache oc = cache;
113 cache = nc;
114
115 if (oc != null) {
116 for (DfsPackFile pack : oc.getPackFiles())
117 pack.key.cachedSize.set(0);
118 }
119 }
120
121
122 public static DfsBlockCache getInstance() {
123 return cache;
124 }
125
126
127 private final int tableSize;
128
129
130 private final AtomicReferenceArray<HashEntry> table;
131
132
133 private final ReentrantLock[] loadLocks;
134
135
136 private final long maxBytes;
137
138
139 private final long maxStreamThroughCache;
140
141
142
143
144
145
146
147
148
149 private final int blockSize;
150
151
152 private final int blockSizeShift;
153
154
155 private final Map<DfsPackDescription, DfsPackFile> packCache;
156
157
158 private final Collection<DfsPackFile> packFiles;
159
160
161 private final AtomicLong statHit;
162
163
164 private final AtomicLong statMiss;
165
166
167 private volatile long statEvict;
168
169
170 private final ReentrantLock clockLock;
171
172
173 private Ref clockHand;
174
175
176 private volatile long liveBytes;
177
178 private DfsBlockCache(final DfsBlockCacheConfig cfg) {
179 tableSize = tableSize(cfg);
180 if (tableSize < 1)
181 throw new IllegalArgumentException(JGitText.get().tSizeMustBeGreaterOrEqual1);
182
183 table = new AtomicReferenceArray<HashEntry>(tableSize);
184 loadLocks = new ReentrantLock[32];
185 for (int i = 0; i < loadLocks.length; i++)
186 loadLocks[i] = new ReentrantLock(true );
187
188 int eb = (int) (tableSize * .1);
189 if (64 < eb)
190 eb = 64;
191 else if (eb < 4)
192 eb = 4;
193 if (tableSize < eb)
194 eb = tableSize;
195
196 maxBytes = cfg.getBlockLimit();
197 maxStreamThroughCache = (long) (maxBytes * cfg.getStreamRatio());
198 blockSize = cfg.getBlockSize();
199 blockSizeShift = Integer.numberOfTrailingZeros(blockSize);
200
201 clockLock = new ReentrantLock(true );
202 clockHand = new Ref<Object>(new DfsPackKey(), -1, 0, null);
203 clockHand.next = clockHand;
204
205 packCache = new ConcurrentHashMap<DfsPackDescription, DfsPackFile>(
206 16, 0.75f, 1);
207 packFiles = Collections.unmodifiableCollection(packCache.values());
208
209 statHit = new AtomicLong();
210 statMiss = new AtomicLong();
211 }
212
213 boolean shouldCopyThroughCache(long length) {
214 return length <= maxStreamThroughCache;
215 }
216
217
218 public long getCurrentSize() {
219 return liveBytes;
220 }
221
222
223 public long getFillPercentage() {
224 return getCurrentSize() * 100 / maxBytes;
225 }
226
227
228 public long getHitCount() {
229 return statHit.get();
230 }
231
232
233 public long getMissCount() {
234 return statMiss.get();
235 }
236
237
238 public long getTotalRequestCount() {
239 return getHitCount() + getMissCount();
240 }
241
242
243 public long getHitRatio() {
244 long hits = statHit.get();
245 long miss = statMiss.get();
246 long total = hits + miss;
247 if (total == 0)
248 return 0;
249 return hits * 100 / total;
250 }
251
252
253 public long getEvictions() {
254 return statEvict;
255 }
256
257
258
259
260
261
262
263 public Collection<DfsPackFile> getPackFiles() {
264 return packFiles;
265 }
266
267 DfsPackFile getOrCreate(DfsPackDescription dsc, DfsPackKey key) {
268
269
270
271 synchronized (packCache) {
272 DfsPackFile pack = packCache.get(dsc);
273 if (pack != null && pack.invalid()) {
274 packCache.remove(dsc);
275 pack = null;
276 }
277 if (pack == null) {
278 if (key == null)
279 key = new DfsPackKey();
280 pack = new DfsPackFile(this, dsc, key);
281 packCache.put(dsc, pack);
282 }
283 return pack;
284 }
285 }
286
287 private int hash(int packHash, long off) {
288 return packHash + (int) (off >>> blockSizeShift);
289 }
290
291 int getBlockSize() {
292 return blockSize;
293 }
294
295 private static int tableSize(final DfsBlockCacheConfig cfg) {
296 final int wsz = cfg.getBlockSize();
297 final long limit = cfg.getBlockLimit();
298 if (wsz <= 0)
299 throw new IllegalArgumentException(JGitText.get().invalidWindowSize);
300 if (limit < wsz)
301 throw new IllegalArgumentException(JGitText.get().windowSizeMustBeLesserThanLimit);
302 return (int) Math.min(5 * (limit / wsz) / 2, Integer.MAX_VALUE);
303 }
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318 DfsBlock getOrLoad(DfsPackFile pack, long position, DfsReader ctx)
319 throws IOException {
320 final long requestedPosition = position;
321 position = pack.alignToBlock(position);
322
323 DfsPackKey key = pack.key;
324 int slot = slot(key, position);
325 HashEntry e1 = table.get(slot);
326 DfsBlock v = scan(e1, key, position);
327 if (v != null) {
328 statHit.incrementAndGet();
329 return v;
330 }
331
332 reserveSpace(blockSize);
333 ReentrantLock regionLock = lockFor(key, position);
334 regionLock.lock();
335 try {
336 HashEntry e2 = table.get(slot);
337 if (e2 != e1) {
338 v = scan(e2, key, position);
339 if (v != null) {
340 statHit.incrementAndGet();
341 creditSpace(blockSize);
342 return v;
343 }
344 }
345
346 statMiss.incrementAndGet();
347 boolean credit = true;
348 try {
349 v = pack.readOneBlock(position, ctx);
350 credit = false;
351 } finally {
352 if (credit)
353 creditSpace(blockSize);
354 }
355 if (position != v.start) {
356
357 position = v.start;
358 slot = slot(key, position);
359 e2 = table.get(slot);
360 }
361
362 key.cachedSize.addAndGet(v.size());
363 Ref<DfsBlock> ref = new Ref<DfsBlock>(key, position, v.size(), v);
364 ref.hot = true;
365 for (;;) {
366 HashEntry n = new HashEntry(clean(e2), ref);
367 if (table.compareAndSet(slot, e2, n))
368 break;
369 e2 = table.get(slot);
370 }
371 addToClock(ref, blockSize - v.size());
372 } finally {
373 regionLock.unlock();
374 }
375
376
377
378 if (v.contains(pack.key, requestedPosition))
379 return v;
380 return getOrLoad(pack, requestedPosition, ctx);
381 }
382
383 @SuppressWarnings("unchecked")
384 private void reserveSpace(int reserve) {
385 clockLock.lock();
386 try {
387 long live = liveBytes + reserve;
388 if (maxBytes < live) {
389 Ref prev = clockHand;
390 Ref hand = clockHand.next;
391 do {
392 if (hand.hot) {
393
394
395 hand.hot = false;
396 prev = hand;
397 hand = hand.next;
398 continue;
399 } else if (prev == hand)
400 break;
401
402
403
404 Ref dead = hand;
405 hand = hand.next;
406 prev.next = hand;
407 dead.next = null;
408 dead.value = null;
409 live -= dead.size;
410 dead.pack.cachedSize.addAndGet(-dead.size);
411 statEvict++;
412 } while (maxBytes < live);
413 clockHand = prev;
414 }
415 liveBytes = live;
416 } finally {
417 clockLock.unlock();
418 }
419 }
420
421 private void creditSpace(int credit) {
422 clockLock.lock();
423 liveBytes -= credit;
424 clockLock.unlock();
425 }
426
427 private void addToClock(Ref ref, int credit) {
428 clockLock.lock();
429 try {
430 if (credit != 0)
431 liveBytes -= credit;
432 Ref ptr = clockHand;
433 ref.next = ptr.next;
434 ptr.next = ref;
435 clockHand = ref;
436 } finally {
437 clockLock.unlock();
438 }
439 }
440
441 void put(DfsBlock v) {
442 put(v.pack, v.start, v.size(), v);
443 }
444
445 <T> Ref<T> put(DfsPackKey key, long pos, int size, T v) {
446 int slot = slot(key, pos);
447 HashEntry e1 = table.get(slot);
448 Ref<T> ref = scanRef(e1, key, pos);
449 if (ref != null)
450 return ref;
451
452 reserveSpace(size);
453 ReentrantLock regionLock = lockFor(key, pos);
454 regionLock.lock();
455 try {
456 HashEntry e2 = table.get(slot);
457 if (e2 != e1) {
458 ref = scanRef(e2, key, pos);
459 if (ref != null) {
460 creditSpace(size);
461 return ref;
462 }
463 }
464
465 key.cachedSize.addAndGet(size);
466 ref = new Ref<T>(key, pos, size, v);
467 ref.hot = true;
468 for (;;) {
469 HashEntry n = new HashEntry(clean(e2), ref);
470 if (table.compareAndSet(slot, e2, n))
471 break;
472 e2 = table.get(slot);
473 }
474 addToClock(ref, 0);
475 } finally {
476 regionLock.unlock();
477 }
478 return ref;
479 }
480
481 boolean contains(DfsPackKey key, long position) {
482 return scan(table.get(slot(key, position)), key, position) != null;
483 }
484
485 @SuppressWarnings("unchecked")
486 <T> T get(DfsPackKey key, long position) {
487 T val = (T) scan(table.get(slot(key, position)), key, position);
488 if (val == null)
489 statMiss.incrementAndGet();
490 else
491 statHit.incrementAndGet();
492 return val;
493 }
494
495 private <T> T scan(HashEntry n, DfsPackKey pack, long position) {
496 Ref<T> r = scanRef(n, pack, position);
497 return r != null ? r.get() : null;
498 }
499
500 @SuppressWarnings("unchecked")
501 private <T> Ref<T> scanRef(HashEntry n, DfsPackKey pack, long position) {
502 for (; n != null; n = n.next) {
503 Ref<T> r = n.ref;
504 if (r.pack == pack && r.position == position)
505 return r.get() != null ? r : null;
506 }
507 return null;
508 }
509
510 void remove(DfsPackFile pack) {
511 synchronized (packCache) {
512 packCache.remove(pack.getPackDescription());
513 }
514 }
515
516 private int slot(DfsPackKey pack, long position) {
517 return (hash(pack.hash, position) >>> 1) % tableSize;
518 }
519
520 private ReentrantLock lockFor(DfsPackKey pack, long position) {
521 return loadLocks[(hash(pack.hash, position) >>> 1) % loadLocks.length];
522 }
523
524 private static HashEntry clean(HashEntry top) {
525 while (top != null && top.ref.next == null)
526 top = top.next;
527 if (top == null)
528 return null;
529 HashEntry n = clean(top.next);
530 return n == top.next ? top : new HashEntry(n, top.ref);
531 }
532
533 private static final class HashEntry {
534
535 final HashEntry next;
536
537
538 final Ref ref;
539
540 HashEntry(HashEntry n, Ref r) {
541 next = n;
542 ref = r;
543 }
544 }
545
546 static final class Ref<T> {
547 final DfsPackKey pack;
548 final long position;
549 final int size;
550 volatile T value;
551 Ref next;
552 volatile boolean hot;
553
554 Ref(DfsPackKey pack, long position, int size, T v) {
555 this.pack = pack;
556 this.position = position;
557 this.size = size;
558 this.value = v;
559 }
560
561 T get() {
562 T v = value;
563 if (v != null)
564 hot = true;
565 return v;
566 }
567
568 boolean has() {
569 return value != null;
570 }
571 }
572 }