View Javadoc
1   /*
2    * Copyright (C) 2017, Google Inc.
3    * and other copyright owners as documented in the project's IP log.
4    *
5    * This program and the accompanying materials are made available
6    * under the terms of the Eclipse Distribution License v1.0 which
7    * accompanies this distribution, is reproduced below, and is
8    * available at http://www.eclipse.org/org/documents/edl-v10.php
9    *
10   * All rights reserved.
11   *
12   * Redistribution and use in source and binary forms, with or
13   * without modification, are permitted provided that the following
14   * conditions are met:
15   *
16   * - Redistributions of source code must retain the above copyright
17   *	 notice, this list of conditions and the following disclaimer.
18   *
19   * - Redistributions in binary form must reproduce the above
20   *	 copyright notice, this list of conditions and the following
21   *	 disclaimer in the documentation and/or other materials provided
22   *	 with the distribution.
23   *
24   * - Neither the name of the Eclipse Foundation, Inc. nor the
25   *	 names of its contributors may be used to endorse or promote
26   *	 products derived from this software without specific prior
27   *	 written permission.
28   *
29   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
30   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
31   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
32   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
34   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
38   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
40   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
41   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42   */
43  
44  package org.eclipse.jgit.internal.storage.file;
45  
46  import static java.nio.file.StandardCopyOption.ATOMIC_MOVE;
47  import static org.eclipse.jgit.lib.Constants.OBJECT_ID_LENGTH;
48  import static org.eclipse.jgit.lib.Constants.OBJ_OFS_DELTA;
49  import static org.eclipse.jgit.lib.Constants.OBJ_REF_DELTA;
50  
51  import java.io.BufferedInputStream;
52  import java.io.EOFException;
53  import java.io.File;
54  import java.io.FileOutputStream;
55  import java.io.FilterInputStream;
56  import java.io.IOException;
57  import java.io.InputStream;
58  import java.io.OutputStream;
59  import java.io.RandomAccessFile;
60  import java.nio.channels.Channels;
61  import java.text.MessageFormat;
62  import java.util.Collection;
63  import java.util.Collections;
64  import java.util.HashSet;
65  import java.util.List;
66  import java.util.Set;
67  import java.util.zip.CRC32;
68  import java.util.zip.DataFormatException;
69  import java.util.zip.Deflater;
70  import java.util.zip.DeflaterOutputStream;
71  import java.util.zip.Inflater;
72  import java.util.zip.InflaterInputStream;
73  
74  import org.eclipse.jgit.errors.CorruptObjectException;
75  import org.eclipse.jgit.errors.IncorrectObjectTypeException;
76  import org.eclipse.jgit.errors.LargeObjectException;
77  import org.eclipse.jgit.errors.MissingObjectException;
78  import org.eclipse.jgit.internal.JGitText;
79  import org.eclipse.jgit.lib.AbbreviatedObjectId;
80  import org.eclipse.jgit.lib.AnyObjectId;
81  import org.eclipse.jgit.lib.Constants;
82  import org.eclipse.jgit.lib.InflaterCache;
83  import org.eclipse.jgit.lib.ObjectId;
84  import org.eclipse.jgit.lib.ObjectIdOwnerMap;
85  import org.eclipse.jgit.lib.ObjectInserter;
86  import org.eclipse.jgit.lib.ObjectLoader;
87  import org.eclipse.jgit.lib.ObjectReader;
88  import org.eclipse.jgit.lib.ObjectStream;
89  import org.eclipse.jgit.transport.PackParser;
90  import org.eclipse.jgit.transport.PackedObjectInfo;
91  import org.eclipse.jgit.util.BlockList;
92  import org.eclipse.jgit.util.FileUtils;
93  import org.eclipse.jgit.util.IO;
94  import org.eclipse.jgit.util.NB;
95  import org.eclipse.jgit.util.io.CountingOutputStream;
96  import org.eclipse.jgit.util.sha1.SHA1;
97  
98  /**
99   * Object inserter that inserts one pack per call to {@link #flush()}, and never
100  * inserts loose objects.
101  */
102 public class PackInserter extends ObjectInserter {
103 	/** Always produce version 2 indexes, to get CRC data. */
104 	private static final int INDEX_VERSION = 2;
105 
106 	private final ObjectDirectory db;
107 
108 	private List<PackedObjectInfo> objectList;
109 	private ObjectIdOwnerMap<PackedObjectInfo> objectMap;
110 	private boolean rollback;
111 	private boolean checkExisting = true;
112 
113 	private int compression = Deflater.BEST_COMPRESSION;
114 	private File tmpPack;
115 	private PackStream packOut;
116 	private Inflater cachedInflater;
117 
118 	PackInserter(ObjectDirectory db) {
119 		this.db = db;
120 	}
121 
122 	/**
123 	 * Whether to check if objects exist in the repo
124 	 *
125 	 * @param check
126 	 *            if {@code false}, will write out possibly-duplicate objects
127 	 *            without first checking whether they exist in the repo; default
128 	 *            is true.
129 	 */
130 	public void checkExisting(boolean check) {
131 		checkExisting = check;
132 	}
133 
134 	/**
135 	 * Set compression level for zlib deflater.
136 	 *
137 	 * @param compression
138 	 *            compression level for zlib deflater.
139 	 */
140 	public void setCompressionLevel(int compression) {
141 		this.compression = compression;
142 	}
143 
144 	int getBufferSize() {
145 		return buffer().length;
146 	}
147 
148 	/** {@inheritDoc} */
149 	@Override
150 	public ObjectId insert(int type, byte[] data, int off, int len)
151 			throws IOException {
152 		ObjectId id = idFor(type, data, off, len);
153 		if (objectMap != null && objectMap.contains(id)) {
154 			return id;
155 		}
156 		// Ignore loose objects, which are potentially unreachable.
157 		if (checkExisting && db.hasPackedObject(id)) {
158 			return id;
159 		}
160 
161 		long offset = beginObject(type, len);
162 		packOut.compress.write(data, off, len);
163 		packOut.compress.finish();
164 		return endObject(id, offset);
165 	}
166 
167 	/** {@inheritDoc} */
168 	@Override
169 	public ObjectId insert(int type, long len, InputStream in)
170 			throws IOException {
171 		byte[] buf = buffer();
172 		if (len <= buf.length) {
173 			IO.readFully(in, buf, 0, (int) len);
174 			return insert(type, buf, 0, (int) len);
175 		}
176 
177 		long offset = beginObject(type, len);
178 		SHA1 md = digest();
179 		md.update(Constants.encodedTypeString(type));
180 		md.update((byte) ' ');
181 		md.update(Constants.encodeASCII(len));
182 		md.update((byte) 0);
183 
184 		while (0 < len) {
185 			int n = in.read(buf, 0, (int) Math.min(buf.length, len));
186 			if (n <= 0) {
187 				throw new EOFException();
188 			}
189 			md.update(buf, 0, n);
190 			packOut.compress.write(buf, 0, n);
191 			len -= n;
192 		}
193 		packOut.compress.finish();
194 		return endObject(md.toObjectId(), offset);
195 	}
196 
197 	private long beginObject(int type, long len) throws IOException {
198 		if (packOut == null) {
199 			beginPack();
200 		}
201 		long offset = packOut.getOffset();
202 		packOut.beginObject(type, len);
203 		return offset;
204 	}
205 
206 	private ObjectId endObject(ObjectId id, long offset) {
207 		PackedObjectInfo obj = new PackedObjectInfo(id);
208 		obj.setOffset(offset);
209 		obj.setCRC((int) packOut.crc32.getValue());
210 		objectList.add(obj);
211 		objectMap.addIfAbsent(obj);
212 		return id;
213 	}
214 
215 	private static File idxFor(File packFile) {
216 		String p = packFile.getName();
217 		return new File(
218 				packFile.getParentFile(),
219 				p.substring(0, p.lastIndexOf('.')) + ".idx"); //$NON-NLS-1$
220 	}
221 
222 	private void beginPack() throws IOException {
223 		objectList = new BlockList<>();
224 		objectMap = new ObjectIdOwnerMap<>();
225 
226 		rollback = true;
227 		tmpPack = File.createTempFile("insert_", ".pack", db.getDirectory()); //$NON-NLS-1$ //$NON-NLS-2$
228 		packOut = new PackStream(tmpPack);
229 
230 		// Write the header as though it were a single object pack.
231 		packOut.write(packOut.hdrBuf, 0, writePackHeader(packOut.hdrBuf, 1));
232 	}
233 
234 	private static int writePackHeader(byte[] buf, int objectCount) {
235 		System.arraycopy(Constants.PACK_SIGNATURE, 0, buf, 0, 4);
236 		NB.encodeInt32(buf, 4, 2); // Always use pack version 2.
237 		NB.encodeInt32(buf, 8, objectCount);
238 		return 12;
239 	}
240 
241 	/** {@inheritDoc} */
242 	@Override
243 	public PackParser newPackParser(InputStream in) {
244 		throw new UnsupportedOperationException();
245 	}
246 
247 	/** {@inheritDoc} */
248 	@Override
249 	public ObjectReader newReader() {
250 		return new Reader();
251 	}
252 
253 	/** {@inheritDoc} */
254 	@Override
255 	public void flush() throws IOException {
256 		if (tmpPack == null) {
257 			return;
258 		}
259 
260 		if (packOut == null) {
261 			throw new IOException();
262 		}
263 
264 		byte[] packHash;
265 		try {
266 			packHash = packOut.finishPack();
267 		} finally {
268 			packOut = null;
269 		}
270 
271 		Collections.sort(objectList);
272 		File tmpIdx = idxFor(tmpPack);
273 		writePackIndex(tmpIdx, packHash, objectList);
274 
275 		File realPack = new File(db.getPackDirectory(),
276 				"pack-" + computeName(objectList).name() + ".pack"); //$NON-NLS-1$ //$NON-NLS-2$
277 		db.closeAllPackHandles(realPack);
278 		tmpPack.setReadOnly();
279 		FileUtils.rename(tmpPack, realPack, ATOMIC_MOVE);
280 
281 		File realIdx = idxFor(realPack);
282 		tmpIdx.setReadOnly();
283 		try {
284 			FileUtils.rename(tmpIdx, realIdx, ATOMIC_MOVE);
285 		} catch (IOException e) {
286 			File newIdx = new File(
287 					realIdx.getParentFile(), realIdx.getName() + ".new"); //$NON-NLS-1$
288 			try {
289 				FileUtils.rename(tmpIdx, newIdx, ATOMIC_MOVE);
290 			} catch (IOException e2) {
291 				newIdx = tmpIdx;
292 				e = e2;
293 			}
294 			throw new IOException(MessageFormat.format(
295 					JGitText.get().panicCantRenameIndexFile, newIdx,
296 					realIdx), e);
297 		}
298 
299 		db.openPack(realPack);
300 		rollback = false;
301 		clear();
302 	}
303 
304 	private static void writePackIndex(File idx, byte[] packHash,
305 			List<PackedObjectInfo> list) throws IOException {
306 		try (OutputStream os = new FileOutputStream(idx)) {
307 			PackIndexWriter w = PackIndexWriter.createVersion(os, INDEX_VERSION);
308 			w.write(list, packHash);
309 		}
310 	}
311 
312 	private ObjectId computeName(List<PackedObjectInfo> list) {
313 		SHA1 md = digest().reset();
314 		byte[] buf = buffer();
315 		for (PackedObjectInfo otp : list) {
316 			otp.copyRawTo(buf, 0);
317 			md.update(buf, 0, OBJECT_ID_LENGTH);
318 		}
319 		return ObjectId.fromRaw(md.digest());
320 	}
321 
322 	/** {@inheritDoc} */
323 	@Override
324 	public void close() {
325 		try {
326 			if (packOut != null) {
327 				try {
328 					packOut.close();
329 				} catch (IOException err) {
330 					// Ignore a close failure, the pack should be removed.
331 				}
332 			}
333 			if (rollback && tmpPack != null) {
334 				try {
335 					FileUtils.delete(tmpPack);
336 				} catch (IOException e) {
337 					// Still delete idx.
338 				}
339 				try {
340 					FileUtils.delete(idxFor(tmpPack));
341 				} catch (IOException e) {
342 					// Ignore error deleting temp idx.
343 				}
344 				rollback = false;
345 			}
346 		} finally {
347 			clear();
348 			try {
349 				InflaterCache.release(cachedInflater);
350 			} finally {
351 				cachedInflater = null;
352 			}
353 		}
354 	}
355 
356 	private void clear() {
357 		objectList = null;
358 		objectMap = null;
359 		tmpPack = null;
360 		packOut = null;
361 	}
362 
363 	private Inflater inflater() {
364 		if (cachedInflater == null) {
365 			cachedInflater = InflaterCache.get();
366 		} else {
367 			cachedInflater.reset();
368 		}
369 		return cachedInflater;
370 	}
371 
372 	/**
373 	 * Stream that writes to a pack file.
374 	 * <p>
375 	 * Backed by two views of the same open file descriptor: a random-access file,
376 	 * and an output stream. Seeking in the file causes subsequent writes to the
377 	 * output stream to occur wherever the file pointer is pointing, so we need to
378 	 * take care to always seek to the end of the file before writing a new
379 	 * object.
380 	 * <p>
381 	 * Callers should always use {@link #seek(long)} to seek, rather than reaching
382 	 * into the file member. As long as this contract is followed, calls to {@link
383 	 * #write(byte[], int, int)} are guaranteed to write at the end of the file,
384 	 * even if there have been intermediate seeks.
385 	 */
386 	private class PackStream extends OutputStream {
387 		final byte[] hdrBuf;
388 		final CRC32 crc32;
389 		final DeflaterOutputStream compress;
390 
391 		private final RandomAccessFile file;
392 		private final CountingOutputStream out;
393 		private final Deflater deflater;
394 
395 		private boolean atEnd;
396 
397 		PackStream(File pack) throws IOException {
398 			file = new RandomAccessFile(pack, "rw"); //$NON-NLS-1$
399 			out = new CountingOutputStream(new FileOutputStream(file.getFD()));
400 			deflater = new Deflater(compression);
401 			compress = new DeflaterOutputStream(this, deflater, 8192);
402 			hdrBuf = new byte[32];
403 			crc32 = new CRC32();
404 			atEnd = true;
405 		}
406 
407 		long getOffset() {
408 			// This value is accurate as long as we only ever write to the end of the
409 			// file, and don't seek back to overwrite any previous segments. Although
410 			// this is subtle, storing the stream counter this way is still preferable
411 			// to returning file.length() here, as it avoids a syscall and possible
412 			// IOException.
413 			return out.getCount();
414 		}
415 
416 		void seek(long offset) throws IOException {
417 			file.seek(offset);
418 			atEnd = false;
419 		}
420 
421 		void beginObject(int objectType, long length) throws IOException {
422 			crc32.reset();
423 			deflater.reset();
424 			write(hdrBuf, 0, encodeTypeSize(objectType, length));
425 		}
426 
427 		private int encodeTypeSize(int type, long rawLength) {
428 			long nextLength = rawLength >>> 4;
429 			hdrBuf[0] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (type << 4) | (rawLength & 0x0F));
430 			rawLength = nextLength;
431 			int n = 1;
432 			while (rawLength > 0) {
433 				nextLength >>>= 7;
434 				hdrBuf[n++] = (byte) ((nextLength > 0 ? 0x80 : 0x00) | (rawLength & 0x7F));
435 				rawLength = nextLength;
436 			}
437 			return n;
438 		}
439 
440 		@Override
441 		public void write(int b) throws IOException {
442 			hdrBuf[0] = (byte) b;
443 			write(hdrBuf, 0, 1);
444 		}
445 
446 		@Override
447 		public void write(byte[] data, int off, int len) throws IOException {
448 			crc32.update(data, off, len);
449 			if (!atEnd) {
450 				file.seek(file.length());
451 				atEnd = true;
452 			}
453 			out.write(data, off, len);
454 		}
455 
456 		byte[] finishPack() throws IOException {
457 			// Overwrite placeholder header with actual object count, then hash. This
458 			// method intentionally uses direct seek/write calls rather than the
459 			// wrappers which keep track of atEnd. This leaves atEnd, the file
460 			// pointer, and out's counter in an inconsistent state; that's ok, since
461 			// this method closes the file anyway.
462 			try {
463 				file.seek(0);
464 				out.write(hdrBuf, 0, writePackHeader(hdrBuf, objectList.size()));
465 
466 				byte[] buf = buffer();
467 				SHA1 md = digest().reset();
468 				file.seek(0);
469 				while (true) {
470 					int r = file.read(buf);
471 					if (r < 0) {
472 						break;
473 					}
474 					md.update(buf, 0, r);
475 				}
476 				byte[] packHash = md.digest();
477 				out.write(packHash, 0, packHash.length);
478 				return packHash;
479 			} finally {
480 				close();
481 			}
482 		}
483 
484 		@Override
485 		public void close() throws IOException {
486 			deflater.end();
487 			try {
488 				out.close();
489 			} finally {
490 				file.close();
491 			}
492 		}
493 
494 		byte[] inflate(long filePos, int len) throws IOException, DataFormatException {
495 			byte[] dstbuf;
496 			try {
497 				dstbuf = new byte[len];
498 			} catch (OutOfMemoryError noMemory) {
499 				return null; // Caller will switch to large object streaming.
500 			}
501 
502 			byte[] srcbuf = buffer();
503 			Inflater inf = inflater();
504 			filePos += setInput(filePos, inf, srcbuf);
505 			for (int dstoff = 0;;) {
506 				int n = inf.inflate(dstbuf, dstoff, dstbuf.length - dstoff);
507 				dstoff += n;
508 				if (inf.finished()) {
509 					return dstbuf;
510 				}
511 				if (inf.needsInput()) {
512 					filePos += setInput(filePos, inf, srcbuf);
513 				} else if (n == 0) {
514 					throw new DataFormatException();
515 				}
516 			}
517 		}
518 
519 		private int setInput(long filePos, Inflater inf, byte[] buf)
520 				throws IOException {
521 			if (file.getFilePointer() != filePos) {
522 				seek(filePos);
523 			}
524 			int n = file.read(buf);
525 			if (n < 0) {
526 				throw new EOFException(JGitText.get().unexpectedEofInPack);
527 			}
528 			inf.setInput(buf, 0, n);
529 			return n;
530 		}
531 	}
532 
533 	private class Reader extends ObjectReader {
534 		private final ObjectReader ctx;
535 
536 		private Reader() {
537 			ctx = db.newReader();
538 			setStreamFileThreshold(ctx.getStreamFileThreshold());
539 		}
540 
541 		@Override
542 		public ObjectReader newReader() {
543 			return db.newReader();
544 		}
545 
546 		@Override
547 		public ObjectInserter getCreatedFromInserter() {
548 			return PackInserter.this;
549 		}
550 
551 		@Override
552 		public Collection<ObjectId> resolve(AbbreviatedObjectId id)
553 				throws IOException {
554 			Collection<ObjectId> stored = ctx.resolve(id);
555 			if (objectList == null) {
556 				return stored;
557 			}
558 
559 			Set<ObjectId> r = new HashSet<>(stored.size() + 2);
560 			r.addAll(stored);
561 			for (PackedObjectInfo obj : objectList) {
562 				if (id.prefixCompare(obj) == 0) {
563 					r.add(obj.copy());
564 				}
565 			}
566 			return r;
567 		}
568 
569 		@Override
570 		public ObjectLoader open(AnyObjectId objectId, int typeHint)
571 				throws MissingObjectException, IncorrectObjectTypeException,
572 				IOException {
573 			if (objectMap == null) {
574 				return ctx.open(objectId, typeHint);
575 			}
576 
577 			PackedObjectInfo obj = objectMap.get(objectId);
578 			if (obj == null) {
579 				return ctx.open(objectId, typeHint);
580 			}
581 
582 			byte[] buf = buffer();
583 			packOut.seek(obj.getOffset());
584 			int cnt = packOut.file.read(buf, 0, 20);
585 			if (cnt <= 0) {
586 				throw new EOFException(JGitText.get().unexpectedEofInPack);
587 			}
588 
589 			int c = buf[0] & 0xff;
590 			int type = (c >> 4) & 7;
591 			if (type == OBJ_OFS_DELTA || type == OBJ_REF_DELTA) {
592 				throw new IOException(MessageFormat.format(
593 						JGitText.get().cannotReadBackDelta, Integer.toString(type)));
594 			}
595 			if (typeHint != OBJ_ANY && type != typeHint) {
596 				throw new IncorrectObjectTypeException(objectId.copy(), typeHint);
597 			}
598 
599 			long sz = c & 0x0f;
600 			int ptr = 1;
601 			int shift = 4;
602 			while ((c & 0x80) != 0) {
603 				if (ptr >= cnt) {
604 					throw new EOFException(JGitText.get().unexpectedEofInPack);
605 				}
606 				c = buf[ptr++] & 0xff;
607 				sz += ((long) (c & 0x7f)) << shift;
608 				shift += 7;
609 			}
610 
611 			long zpos = obj.getOffset() + ptr;
612 			if (sz < getStreamFileThreshold()) {
613 				byte[] data = inflate(obj, zpos, (int) sz);
614 				if (data != null) {
615 					return new ObjectLoader.SmallObject(type, data);
616 				}
617 			}
618 			return new StreamLoader(type, sz, zpos);
619 		}
620 
621 		private byte[] inflate(PackedObjectInfo obj, long zpos, int sz)
622 				throws IOException, CorruptObjectException {
623 			try {
624 				return packOut.inflate(zpos, sz);
625 			} catch (DataFormatException dfe) {
626 				throw new CorruptObjectException(
627 						MessageFormat.format(
628 								JGitText.get().objectAtHasBadZlibStream,
629 								Long.valueOf(obj.getOffset()),
630 								tmpPack.getAbsolutePath()),
631 						dfe);
632 			}
633 		}
634 
635 		@Override
636 		public Set<ObjectId> getShallowCommits() throws IOException {
637 			return ctx.getShallowCommits();
638 		}
639 
640 		@Override
641 		public void close() {
642 			ctx.close();
643 		}
644 
645 		private class StreamLoader extends ObjectLoader {
646 			private final int type;
647 			private final long size;
648 			private final long pos;
649 
650 			StreamLoader(int type, long size, long pos) {
651 				this.type = type;
652 				this.size = size;
653 				this.pos = pos;
654 			}
655 
656 			@Override
657 			public ObjectStream openStream()
658 					throws MissingObjectException, IOException {
659 				int bufsz = buffer().length;
660 				packOut.seek(pos);
661 
662 				InputStream fileStream = new FilterInputStream(
663 						Channels.newInputStream(packOut.file.getChannel())) {
664 							// atEnd was already set to false by the previous seek, but it's
665 							// technically possible for a caller to call insert on the
666 							// inserter in the middle of reading from this stream. Behavior is
667 							// undefined in this case, so it would arguably be ok to ignore,
668 							// but it's not hard to at least make an attempt to not corrupt
669 							// the data.
670 							@Override
671 							public int read() throws IOException {
672 								packOut.atEnd = false;
673 								return super.read();
674 							}
675 
676 							@Override
677 							public int read(byte[] b) throws IOException {
678 								packOut.atEnd = false;
679 								return super.read(b);
680 							}
681 
682 							@Override
683 							public int read(byte[] b, int off, int len) throws IOException {
684 								packOut.atEnd = false;
685 								return super.read(b,off,len);
686 							}
687 
688 							@Override
689 							public void close() {
690 								// Never close underlying RandomAccessFile, which lasts the
691 								// lifetime of the enclosing PackStream.
692 							}
693 						};
694 				return new ObjectStream.Filter(
695 						type, size,
696 						new BufferedInputStream(
697 								new InflaterInputStream(fileStream, inflater(), bufsz), bufsz));
698 			}
699 
700 			@Override
701 			public int getType() {
702 				return type;
703 			}
704 
705 			@Override
706 			public long getSize() {
707 				return size;
708 			}
709 
710 			@Override
711 			public byte[] getCachedBytes() throws LargeObjectException {
712 				throw new LargeObjectException.ExceedsLimit(
713 						getStreamFileThreshold(), size);
714 			}
715 		}
716 	}
717 }