View Javadoc
1   /*
2    * Copyright (C) 2017, Google Inc.
3    * and other copyright owners as documented in the project's IP log.
4    *
5    * This program and the accompanying materials are made available
6    * under the terms of the Eclipse Distribution License v1.0 which
7    * accompanies this distribution, is reproduced below, and is
8    * available at http://www.eclipse.org/org/documents/edl-v10.php
9    *
10   * All rights reserved.
11   *
12   * Redistribution and use in source and binary forms, with or
13   * without modification, are permitted provided that the following
14   * conditions are met:
15   *
16   * - Redistributions of source code must retain the above copyright
17   *	 notice, this list of conditions and the following disclaimer.
18   *
19   * - Redistributions in binary form must reproduce the above
20   *	 copyright notice, this list of conditions and the following
21   *	 disclaimer in the documentation and/or other materials provided
22   *	 with the distribution.
23   *
24   * - Neither the name of the Eclipse Foundation, Inc. nor the
25   *	 names of its contributors may be used to endorse or promote
26   *	 products derived from this software without specific prior
27   *	 written permission.
28   *
29   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
30   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
31   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
34   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42   */
43  
44  package org.eclipse.jgit.internal.storage.file;
45  
46  import static java.nio.file.StandardCopyOption.ATOMIC_MOVE;
47  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
48  import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
49  import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
50  
51  import java.io.BufferedInputStream;
52  import java.io.EOFException;
53  import java.io.File;
54  import java.io.FileOutputStream;
55  import java.io.FilterInputStream;
56  import java.io.IOException;
57  import java.io.InputStream;
58  import java.io.OutputStream;
59  import java.io.RandomAccessFile;
60  import java.nio.channels.Channels;
61  import java.text.MessageFormat;
62  import java.util.Collection;
63  import java.util.Collections;
64  import java.util.HashSet;
65  import java.util.List;
66  import java.util.Set;
67  import java.util.zip.CRC32;
68  import java.util.zip.DataFormatException;
69  import java.util.zip.Deflater;
70  import java.util.zip.DeflaterOutputStream;
71  import java.util.zip.Inflater;
72  import java.util.zip.InflaterInputStream;
73  
74  import org.eclipse.jgit.errors.CorruptObjectException;
75  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
76  import org.eclipse.jgit.errors.LargeObjectException;
77  import org.eclipse.jgit.errors.MissingObjectException;
78  import org.eclipse.jgit.internal.JGitText;
79  import org.eclipse.jgit.lib.AbbreviatedObjectId;
80  import org.eclipse.jgit.lib.AnyObjectId;
81  import org.eclipse.jgit.lib.Constants;
82  import org.eclipse.jgit.lib.InflaterCache;
83  import org.eclipse.jgit.lib.ObjectId;
84  import org.eclipse.jgit.lib.ObjectIdOwnerMap;
85  import org.eclipse.jgit.lib.ObjectInserter;
86  import org.eclipse.jgit.lib.ObjectLoader;
87  import org.eclipse.jgit.lib.ObjectReader;
88  import org.eclipse.jgit.lib.ObjectStream;
89  import org.eclipse.jgit.storage.pack.PackConfig;
90  import org.eclipse.jgit.transport.PackParser;
91  import org.eclipse.jgit.transport.PackedObjectInfo;
92  import org.eclipse.jgit.util.BlockList;
93  import org.eclipse.jgit.util.FileUtils;
94  import org.eclipse.jgit.util.IO;
95  import org.eclipse.jgit.util.NB;
96  import org.eclipse.jgit.util.io.CountingOutputStream;
97  import org.eclipse.jgit.util.sha1.SHA1;
98  
99  /**
100  * Object inserter that inserts one pack per call to {@link #flush()}, and never
101  * inserts loose objects.
102  */
103 public class PackInserter extends ObjectInserter {
104 	/** Always produce version 2 indexes, to get CRC data. */
105 	private static final int INDEX_VERSION = 2;
106 
107 	private final ObjectDirectory db;
108 
109 	private List<PackedObjectInfo> objectList;
110 	private ObjectIdOwnerMap<PackedObjectInfo> objectMap;
111 	private boolean rollback;
112 	private boolean checkExisting = true;
113 
114 	private int compression = Deflater.BEST_COMPRESSION;
115 	private File tmpPack;
116 	private PackStream packOut;
117 	private Inflater cachedInflater;
118 
119 	private PackConfig pconfig;
120 
121 	PackInserter(ObjectDirectory db) {
122 		this.db = db;
123 		this.pconfig = new PackConfig(db.getConfig());
124 	}
125 
126 	/**
127 	 * Whether to check if objects exist in the repo
128 	 *
129 	 * @param check
130 	 *            if {@code false}, will write out possibly-duplicate objects
131 	 *            without first checking whether they exist in the repo; default
132 	 *            is true.
133 	 */
134 	public void checkExisting(boolean check) {
135 		checkExisting = check;
136 	}
137 
138 	/**
139 	 * Set compression level for zlib deflater.
140 	 *
141 	 * @param compression
142 	 *            compression level for zlib deflater.
143 	 */
144 	public void setCompressionLevel(int compression) {
145 		this.compression = compression;
146 	}
147 
148 	int getBufferSize() {
149 		return buffer().length;
150 	}
151 
152 	/** {@inheritDoc} */
153 	@Override
154 	public ObjectId insert(int type, byte[] data, int off, int len)
155 			throws IOException {
156 		ObjectId id = idFor(type, data, off, len);
157 		if (objectMap != null && objectMap.contains(id)) {
158 			return id;
159 		}
160 		// Ignore loose objects, which are potentially unreachable.
161 		if (checkExisting && db.hasPackedObject(id)) {
162 			return id;
163 		}
164 
165 		long offset = beginObject(type, len);
166 		packOut.compress.write(data, off, len);
167 		packOut.compress.finish();
168 		return endObject(id, offset);
169 	}
170 
171 	/** {@inheritDoc} */
172 	@Override
173 	public ObjectId insert(int type, long len, InputStream in)
174 			throws IOException {
175 		byte[] buf = buffer();
176 		if (len <= buf.length) {
177 			IO.readFully(in, buf, 0, (int) len);
178 			return insert(type, buf, 0, (int) len);
179 		}
180 
181 		long offset = beginObject(type, len);
182 		SHA1 md = digest();
183 		md.update(Constants.encodedTypeString(type));
184 		md.update((byte) ' ');
185 		md.update(Constants.encodeASCII(len));
186 		md.update((byte) 0);
187 
188 		while (0 < len) {
189 			int n = in.read(buf, 0, (int) Math.min(buf.length, len));
190 			if (n <= 0) {
191 				throw new EOFException();
192 			}
193 			md.update(buf, 0, n);
194 			packOut.compress.write(buf, 0, n);
195 			len -= n;
196 		}
197 		packOut.compress.finish();
198 		return endObject(md.toObjectId(), offset);
199 	}
200 
201 	private long beginObject(int type, long len) throws IOException {
202 		if (packOut == null) {
203 			beginPack();
204 		}
205 		long offset = packOut.getOffset();
206 		packOut.beginObject(type, len);
207 		return offset;
208 	}
209 
210 	private ObjectId/../../../../../org/eclipse/jgit/lib/ObjectId.html#ObjectId">ObjectId endObject(ObjectId id, long offset) {
211 		PackedObjectInfo obj = new PackedObjectInfo(id);
212 		obj.setOffset(offset);
213 		obj.setCRC((int) packOut.crc32.getValue());
214 		objectList.add(obj);
215 		objectMap.addIfAbsent(obj);
216 		return id;
217 	}
218 
219 	private static File idxFor(File packFile) {
220 		String p = packFile.getName();
221 		return new File(
222 				packFile.getParentFile(),
223 				p.substring(0, p.lastIndexOf('.')) + ".idx"); //$NON-NLS-1$
224 	}
225 
226 	private void beginPack() throws IOException {
227 		objectList = new BlockList<>();
228 		objectMap = new ObjectIdOwnerMap<>();
229 
230 		rollback = true;
231 		tmpPack = File.createTempFile("insert_", ".pack", db.getDirectory()); //$NON-NLS-1$ //$NON-NLS-2$
232 		packOut = new PackStream(tmpPack);
233 
234 		// Write the header as though it were a single object pack.
235 		packOut.write(packOut.hdrBuf, 0, writePackHeader(packOut.hdrBuf, 1));
236 	}
237 
238 	private static int writePackHeader(byte[] buf, int objectCount) {
239 		System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4);
240 		NB.encodeInt32(buf, 4, 2); // Always use pack version 2.
241 		NB.encodeInt32(buf, 8, objectCount);
242 		return 12;
243 	}
244 
245 	/** {@inheritDoc} */
246 	@Override
247 	public PackParser newPackParser(InputStream in) {
248 		throw new UnsupportedOperationException();
249 	}
250 
251 	/** {@inheritDoc} */
252 	@Override
253 	public ObjectReader newReader() {
254 		return new Reader();
255 	}
256 
257 	/** {@inheritDoc} */
258 	@Override
259 	public void flush() throws IOException {
260 		if (tmpPack == null) {
261 			return;
262 		}
263 
264 		if (packOut == null) {
265 			throw new IOException();
266 		}
267 
268 		byte[] packHash;
269 		try {
270 			packHash = packOut.finishPack();
271 		} finally {
272 			packOut = null;
273 		}
274 
275 		Collections.sort(objectList);
276 		File tmpIdx = idxFor(tmpPack);
277 		writePackIndex(tmpIdx, packHash, objectList);
278 
279 		File realPack = new File(db.getPackDirectory(),
280 				"pack-" + computeName(objectList).name() + ".pack"); //$NON-NLS-1$ //$NON-NLS-2$
281 		db.closeAllPackHandles(realPack);
282 		tmpPack.setReadOnly();
283 		FileUtils.rename(tmpPack, realPack, ATOMIC_MOVE);
284 
285 		File realIdx = idxFor(realPack);
286 		tmpIdx.setReadOnly();
287 		try {
288 			FileUtils.rename(tmpIdx, realIdx, ATOMIC_MOVE);
289 		} catch (IOException e) {
290 			File newIdx = new File(
291 					realIdx.getParentFile(), realIdx.getName() + ".new"); //$NON-NLS-1$
292 			try {
293 				FileUtils.rename(tmpIdx, newIdx, ATOMIC_MOVE);
294 			} catch (IOException e2) {
295 				newIdx = tmpIdx;
296 				e = e2;
297 			}
298 			throw new IOException(MessageFormat.format(
299 					JGitText.get().panicCantRenameIndexFile, newIdx,
300 					realIdx), e);
301 		}
302 
303 		boolean interrupted = false;
304 		try {
305 			FileSnapshot snapshot = FileSnapshot.save(realPack);
306 			if (pconfig.doWaitPreventRacyPack(snapshot.size())) {
307 				snapshot.waitUntilNotRacy();
308 			}
309 		} catch (InterruptedException e) {
310 			interrupted = true;
311 		}
312 		try {
313 			db.openPack(realPack);
314 			rollback = false;
315 		} finally {
316 			clear();
317 			if (interrupted) {
318 				// Re-set interrupted flag
319 				Thread.currentThread().interrupt();
320 			}
321 		}
322 	}
323 
324 	private static void writePackIndex(File idx, byte[] packHash,
325 			List<PackedObjectInfo> list) throws IOException {
326 		try (OutputStream os = new FileOutputStream(idx)) {
327 			PackIndexWriter w = PackIndexWriter.createVersion(os, INDEX_VERSION);
328 			w.write(list, packHash);
329 		}
330 	}
331 
332 	private ObjectId computeName(List<PackedObjectInfo> list) {
333 		SHA1 md = digest().reset();
334 		byte[] buf = buffer();
335 		for (PackedObjectInfo otp : list) {
336 			otp.copyRawTo(buf, 0);
337 			md.update(buf, 0, OBJECT_ID_LENGTH);
338 		}
339 		return ObjectId.fromRaw(md.digest());
340 	}
341 
342 	/** {@inheritDoc} */
343 	@Override
344 	public void close() {
345 		try {
346 			if (packOut != null) {
347 				try {
348 					packOut.close();
349 				} catch (IOException err) {
350 					// Ignore a close failure, the pack should be removed.
351 				}
352 			}
353 			if (rollback && tmpPack != null) {
354 				try {
355 					FileUtils.delete(tmpPack);
356 				} catch (IOException e) {
357 					// Still delete idx.
358 				}
359 				try {
360 					FileUtils.delete(idxFor(tmpPack));
361 				} catch (IOException e) {
362 					// Ignore error deleting temp idx.
363 				}
364 				rollback = false;
365 			}
366 		} finally {
367 			clear();
368 			try {
369 				InflaterCache.release(cachedInflater);
370 			} finally {
371 				cachedInflater = null;
372 			}
373 		}
374 	}
375 
376 	private void clear() {
377 		objectList = null;
378 		objectMap = null;
379 		tmpPack = null;
380 		packOut = null;
381 	}
382 
383 	private Inflater inflater() {
384 		if (cachedInflater == null) {
385 			cachedInflater = InflaterCache.get();
386 		} else {
387 			cachedInflater.reset();
388 		}
389 		return cachedInflater;
390 	}
391 
392 	/**
393 	 * Stream that writes to a pack file.
394 	 * <p>
395 	 * Backed by two views of the same open file descriptor: a random-access file,
396 	 * and an output stream. Seeking in the file causes subsequent writes to the
397 	 * output stream to occur wherever the file pointer is pointing, so we need to
398 	 * take care to always seek to the end of the file before writing a new
399 	 * object.
400 	 * <p>
401 	 * Callers should always use {@link #seek(long)} to seek, rather than reaching
402 	 * into the file member. As long as this contract is followed, calls to {@link
403 	 * #write(byte[], int, int)} are guaranteed to write at the end of the file,
404 	 * even if there have been intermediate seeks.
405 	 */
406 	private class PackStream extends OutputStream {
407 		final byte[] hdrBuf;
408 		final CRC32 crc32;
409 		final DeflaterOutputStream compress;
410 
411 		private final RandomAccessFile file;
412 		private final CountingOutputStream out;
413 		private final Deflater deflater;
414 
415 		private boolean atEnd;
416 
417 		PackStream(File pack) throws IOException {
418 			file = new RandomAccessFile(pack, "rw"); //$NON-NLS-1$
419 			out = new CountingOutputStream(new FileOutputStream(file.getFD()));
420 			deflater = new Deflater(compression);
421 			compress = new DeflaterOutputStream(this, deflater, 8192);
422 			hdrBuf = new byte[32];
423 			crc32 = new CRC32();
424 			atEnd = true;
425 		}
426 
427 		long getOffset() {
428 			// This value is accurate as long as we only ever write to the end of the
429 			// file, and don't seek back to overwrite any previous segments. Although
430 			// this is subtle, storing the stream counter this way is still preferable
431 			// to returning file.length() here, as it avoids a syscall and possible
432 			// IOException.
433 			return out.getCount();
434 		}
435 
436 		void seek(long offset) throws IOException {
437 			file.seek(offset);
438 			atEnd = false;
439 		}
440 
441 		void beginObject(int objectType, long length) throws IOException {
442 			crc32.reset();
443 			deflater.reset();
444 			write(hdrBuf, 0, encodeTypeSize(objectType, length));
445 		}
446 
447 		private int encodeTypeSize(int type, long rawLength) {
448 			long nextLength = rawLength >>> 4;
449 			hdrBuf[0] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (rawLength & 0x0F));
450 			rawLength = nextLength;
451 			int n = 1;
452 			while (rawLength > 0) {
453 				nextLength >>>= 7;
454 				hdrBuf[n++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (rawLength & 0x7F));
455 				rawLength = nextLength;
456 			}
457 			return n;
458 		}
459 
460 		@Override
461 		public void write(int b) throws IOException {
462 			hdrBuf[0] = (byte) b;
463 			write(hdrBuf, 0, 1);
464 		}
465 
466 		@Override
467 		public void write(byte[] data, int off, int len) throws IOException {
468 			crc32.update(data, off, len);
469 			if (!atEnd) {
470 				file.seek(file.length());
471 				atEnd = true;
472 			}
473 			out.write(data, off, len);
474 		}
475 
476 		byte[] finishPack() throws IOException {
477 			// Overwrite placeholder header with actual object count, then hash. This
478 			// method intentionally uses direct seek/write calls rather than the
479 			// wrappers which keep track of atEnd. This leaves atEnd, the file
480 			// pointer, and out's counter in an inconsistent state; that's ok, since
481 			// this method closes the file anyway.
482 			try {
483 				file.seek(0);
484 				out.write(hdrBuf, 0, writePackHeader(hdrBuf, objectList.size()));
485 
486 				byte[] buf = buffer();
487 				SHA1 md = digest().reset();
488 				file.seek(0);
489 				while (true) {
490 					int r = file.read(buf);
491 					if (r < 0) {
492 						break;
493 					}
494 					md.update(buf, 0, r);
495 				}
496 				byte[] packHash = md.digest();
497 				out.write(packHash, 0, packHash.length);
498 				return packHash;
499 			} finally {
500 				close();
501 			}
502 		}
503 
504 		@Override
505 		public void close() throws IOException {
506 			deflater.end();
507 			try {
508 				out.close();
509 			} finally {
510 				file.close();
511 			}
512 		}
513 
514 		byte[] inflate(long filePos, int len) throws IOException, DataFormatException {
515 			byte[] dstbuf;
516 			try {
517 				dstbuf = new byte[len];
518 			} catch (OutOfMemoryError noMemory) {
519 				return null; // Caller will switch to large object streaming.
520 			}
521 
522 			byte[] srcbuf = buffer();
523 			Inflater inf = inflater();
524 			filePos += setInput(filePos, inf, srcbuf);
525 			for (int dstoff = 0;;) {
526 				int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff);
527 				dstoff += n;
528 				if (inf.finished()) {
529 					return dstbuf;
530 				}
531 				if (inf.needsInput()) {
532 					filePos += setInput(filePos, inf, srcbuf);
533 				} else if (n == 0) {
534 					throw new DataFormatException();
535 				}
536 			}
537 		}
538 
539 		private int setInput(long filePos, Inflater inf, byte[] buf)
540 				throws IOException {
541 			if (file.getFilePointer() != filePos) {
542 				seek(filePos);
543 			}
544 			int n = file.read(buf);
545 			if (n < 0) {
546 				throw new EOFException(JGitText.get().unexpectedEofInPack);
547 			}
548 			inf.setInput(buf, 0, n);
549 			return n;
550 		}
551 	}
552 
553 	private class Reader extends ObjectReader {
554 		private final ObjectReader ctx;
555 
556 		private Reader() {
557 			ctx = db.newReader();
558 			setStreamFileThreshold(ctx.getStreamFileThreshold());
559 		}
560 
561 		@Override
562 		public ObjectReader newReader() {
563 			return db.newReader();
564 		}
565 
566 		@Override
567 		public ObjectInserter getCreatedFromInserter() {
568 			return PackInserter.this;
569 		}
570 
571 		@Override
572 		public Collection<ObjectId> resolve(AbbreviatedObjectId id)
573 				throws IOException {
574 			Collection<ObjectId> stored = ctx.resolve(id);
575 			if (objectList == null) {
576 				return stored;
577 			}
578 
579 			Set<ObjectId> r = new HashSet<>(stored.size() + 2);
580 			r.addAll(stored);
581 			for (PackedObjectInfo obj : objectList) {
582 				if (id.prefixCompare(obj) == 0) {
583 					r.add(obj.copy());
584 				}
585 			}
586 			return r;
587 		}
588 
589 		@Override
590 		public ObjectLoader open(AnyObjectId objectId, int typeHint)
591 				throws MissingObjectException, IncorrectObjectTypeException,
592 				IOException {
593 			if (objectMap == null) {
594 				return ctx.open(objectId, typeHint);
595 			}
596 
597 			PackedObjectInfo obj = objectMap.get(objectId);
598 			if (obj == null) {
599 				return ctx.open(objectId, typeHint);
600 			}
601 
602 			byte[] buf = buffer();
603 			packOut.seek(obj.getOffset());
604 			int cnt = packOut.file.read(buf, 0, 20);
605 			if (cnt <= 0) {
606 				throw new EOFException(JGitText.get().unexpectedEofInPack);
607 			}
608 
609 			int c = buf[0] & 0xff;
610 			int type = (c >> 4) & 7;
611 			if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
612 				throw new IOException(MessageFormat.format(
613 						JGitText.get().cannotReadBackDelta, Integer.toString(type)));
614 			}
615 			if (typeHint != OBJ_ANY && type != typeHint) {
616 				throw new IncorrectObjectTypeException(objectId.copy(), typeHint);
617 			}
618 
619 			long sz = c & 0x0f;
620 			int ptr = 1;
621 			int shift = 4;
622 			while ((c & 0x80) != 0) {
623 				if (ptr >= cnt) {
624 					throw new EOFException(JGitText.get().unexpectedEofInPack);
625 				}
626 				c = buf[ptr++] & 0xff;
627 				sz += ((long) (c & 0x7f)) << shift;
628 				shift += 7;
629 			}
630 
631 			long zpos = obj.getOffset() + ptr;
632 			if (sz < getStreamFileThreshold()) {
633 				byte[] data = inflate(obj, zpos, (int) sz);
634 				if (data != null) {
635 					return new ObjectLoader.SmallObject(type, data);
636 				}
637 			}
638 			return new StreamLoader(type, sz, zpos);
639 		}
640 
641 		private byte[] inflate(PackedObjectInfo obj, long zpos, int sz)
642 				throws IOException, CorruptObjectException {
643 			try {
644 				return packOut.inflate(zpos, sz);
645 			} catch (DataFormatException dfe) {
646 				throw new CorruptObjectException(
647 						MessageFormat.format(
648 								JGitText.get().objectAtHasBadZlibStream,
649 								Long.valueOf(obj.getOffset()),
650 								tmpPack.getAbsolutePath()),
651 						dfe);
652 			}
653 		}
654 
655 		@Override
656 		public Set<ObjectId> getShallowCommits() throws IOException {
657 			return ctx.getShallowCommits();
658 		}
659 
660 		@Override
661 		public void close() {
662 			ctx.close();
663 		}
664 
665 		private class StreamLoader extends ObjectLoader {
666 			private final int type;
667 			private final long size;
668 			private final long pos;
669 
670 			StreamLoader(int type, long size, long pos) {
671 				this.type = type;
672 				this.size = size;
673 				this.pos = pos;
674 			}
675 
676 			@Override
677 			public ObjectStream openStream()
678 					throws MissingObjectException, IOException {
679 				int bufsz = buffer().length;
680 				packOut.seek(pos);
681 
682 				InputStream fileStream = new FilterInputStream(
683 						Channels.newInputStream(packOut.file.getChannel())) {
684 							// atEnd was already set to false by the previous seek, but it's
685 							// technically possible for a caller to call insert on the
686 							// inserter in the middle of reading from this stream. Behavior is
687 							// undefined in this case, so it would arguably be ok to ignore,
688 							// but it's not hard to at least make an attempt to not corrupt
689 							// the data.
690 							@Override
691 							public int read() throws IOException {
692 								packOut.atEnd = false;
693 								return super.read();
694 							}
695 
696 							@Override
697 							public int read(byte[] b) throws IOException {
698 								packOut.atEnd = false;
699 								return super.read(b);
700 							}
701 
702 							@Override
703 							public int read(byte[] b, int off, int len) throws IOException {
704 								packOut.atEnd = false;
705 								return super.read(b,off,len);
706 							}
707 
708 							@Override
709 							public void close() {
710 								// Never close underlying RandomAccessFile, which lasts the
711 								// lifetime of the enclosing PackStream.
712 							}
713 						};
714 				return new ObjectStream.Filter(
715 						type, size,
716 						new BufferedInputStream(
717 								new InflaterInputStream(fileStream, inflater(), bufsz), bufsz));
718 			}
719 
720 			@Override
721 			public int getType() {
722 				return type;
723 			}
724 
725 			@Override
726 			public long getSize() {
727 				return size;
728 			}
729 
730 			@Override
731 			public byte[] getCachedBytes() throws LargeObjectException {
732 				throw new LargeObjectException.ExceedsLimit(
733 						getStreamFileThreshold(), size);
734 			}
735 		}
736 	}
737 }