View Javadoc
1   /*
2    * Copyright (C) 2008, Shawn O. Pearce <spearce@spearce.org>
3    * Copyright (C) 2010, Christian Halstrick <christian.halstrick@sap.com>
4    * Copyright (C) 2010, Matthias Sohn <matthias.sohn@sap.com>
5    * Copyright (C) 2012-2013, Robin Rosenberg
6    * and other copyright owners as documented in the project's IP log.
7    *
8    * This program and the accompanying materials are made available
9    * under the terms of the Eclipse Distribution License v1.0 which
10   * accompanies this distribution, is reproduced below, and is
11   * available at http://www.eclipse.org/org/documents/edl-v10.php
12   *
13   * All rights reserved.
14   *
15   * Redistribution and use in source and binary forms, with or
16   * without modification, are permitted provided that the following
17   * conditions are met:
18   *
19   * - Redistributions of source code must retain the above copyright
20   *   notice, this list of conditions and the following disclaimer.
21   *
22   * - Redistributions in binary form must reproduce the above
23   *   copyright notice, this list of conditions and the following
24   *   disclaimer in the documentation and/or other materials provided
25   *   with the distribution.
26   *
27   * - Neither the name of the Eclipse Foundation, Inc. nor the
28   *   names of its contributors may be used to endorse or promote
29   *   products derived from this software without specific prior
30   *   written permission.
31   *
32   * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
33   * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
34   * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35   * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
36   * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
37   * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
38   * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
39   * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
40   * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
41   * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
42   * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
43   * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
44   * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45   */
46  
47  package org.eclipse.jgit.treewalk;
48  
49  import java.io.ByteArrayInputStream;
50  import java.io.File;
51  import java.io.FileInputStream;
52  import java.io.FileNotFoundException;
53  import java.io.IOException;
54  import java.io.InputStream;
55  import java.nio.ByteBuffer;
56  import java.nio.CharBuffer;
57  import java.nio.charset.CharacterCodingException;
58  import java.nio.charset.CharsetEncoder;
59  import java.security.MessageDigest;
60  import java.text.MessageFormat;
61  import java.util.Arrays;
62  import java.util.Collections;
63  import java.util.Comparator;
64  
65  import org.eclipse.jgit.attributes.AttributesNode;
66  import org.eclipse.jgit.attributes.AttributesRule;
67  import org.eclipse.jgit.diff.RawText;
68  import org.eclipse.jgit.dircache.DirCache;
69  import org.eclipse.jgit.dircache.DirCacheEntry;
70  import org.eclipse.jgit.dircache.DirCacheIterator;
71  import org.eclipse.jgit.errors.CorruptObjectException;
72  import org.eclipse.jgit.errors.MissingObjectException;
73  import org.eclipse.jgit.errors.NoWorkTreeException;
74  import org.eclipse.jgit.ignore.FastIgnoreRule;
75  import org.eclipse.jgit.ignore.IgnoreNode;
76  import org.eclipse.jgit.internal.JGitText;
77  import org.eclipse.jgit.lib.Constants;
78  import org.eclipse.jgit.lib.CoreConfig;
79  import org.eclipse.jgit.lib.CoreConfig.CheckStat;
80  import org.eclipse.jgit.lib.CoreConfig.SymLinks;
81  import org.eclipse.jgit.lib.FileMode;
82  import org.eclipse.jgit.lib.ObjectId;
83  import org.eclipse.jgit.lib.ObjectLoader;
84  import org.eclipse.jgit.lib.ObjectReader;
85  import org.eclipse.jgit.lib.Repository;
86  import org.eclipse.jgit.submodule.SubmoduleWalk;
87  import org.eclipse.jgit.util.FS;
88  import org.eclipse.jgit.util.IO;
89  import org.eclipse.jgit.util.RawParseUtils;
90  import org.eclipse.jgit.util.io.EolCanonicalizingInputStream;
91  
92  /**
93   * Walks a working directory tree as part of a {@link TreeWalk}.
94   * <p>
95   * Most applications will want to use the standard implementation of this
96   * iterator, {@link FileTreeIterator}, as that does all IO through the standard
97   * <code>java.io</code> package. Plugins for a Java based IDE may however wish
98   * to create their own implementations of this class to allow traversal of the
99   * IDE's project space, as well as benefit from any caching the IDE may have.
100  *
101  * @see FileTreeIterator
102  */
103 public abstract class WorkingTreeIterator extends AbstractTreeIterator {
104 	/** An empty entry array, suitable for {@link #init(Entry[])}. */
105 	protected static final Entry[] EOF = {};
106 
107 	/** Size we perform file IO in if we have to read and hash a file. */
108 	static final int BUFFER_SIZE = 2048;
109 
110 	/**
111 	 * Maximum size of files which may be read fully into memory for performance
112 	 * reasons.
113 	 */
114 	private static final long MAXIMUM_FILE_SIZE_TO_READ_FULLY = 65536;
115 
116 	/** Inherited state of this iterator, describing working tree, etc. */
117 	private final IteratorState state;
118 
119 	/** The {@link #idBuffer()} for the current entry. */
120 	private byte[] contentId;
121 
122 	/** Index within {@link #entries} that {@link #contentId} came from. */
123 	private int contentIdFromPtr;
124 
125 	/** List of entries obtained from the subclass. */
126 	private Entry[] entries;
127 
128 	/** Total number of entries in {@link #entries} that are valid. */
129 	private int entryCnt;
130 
131 	/** Current position within {@link #entries}. */
132 	private int ptr;
133 
134 	/** If there is a .gitignore file present, the parsed rules from it. */
135 	private IgnoreNode ignoreNode;
136 
137 	/** If there is a .gitattributes file present, the parsed rules from it. */
138 	private AttributesNode attributesNode;
139 
140 	/** Repository that is the root level being iterated over */
141 	protected Repository repository;
142 
143 	/** Cached canonical length, initialized from {@link #idBuffer()} */
144 	private long canonLen = -1;
145 
146 	/** The offset of the content id in {@link #idBuffer()} */
147 	private int contentIdOffset;
148 
149 	/**
150 	 * Holds the {@link AttributesNode} that is stored in
151 	 * $GIT_DIR/info/attributes file.
152 	 */
153 	private AttributesNode infoAttributeNode;
154 
155 	/**
156 	 * Holds the {@link AttributesNode} that is stored in global attribute file.
157 	 *
158 	 * @see CoreConfig#getAttributesFile()
159 	 */
160 	private AttributesNode globalAttributeNode;
161 
162 	/**
163 	 * Create a new iterator with no parent.
164 	 *
165 	 * @param options
166 	 *            working tree options to be used
167 	 */
168 	protected WorkingTreeIterator(WorkingTreeOptions options) {
169 		super();
170 		state = new IteratorState(options);
171 	}
172 
173 	/**
174 	 * Create a new iterator with no parent and a prefix.
175 	 * <p>
176 	 * The prefix path supplied is inserted in front of all paths generated by
177 	 * this iterator. It is intended to be used when an iterator is being
178 	 * created for a subsection of an overall repository and needs to be
179 	 * combined with other iterators that are created to run over the entire
180 	 * repository namespace.
181 	 *
182 	 * @param prefix
183 	 *            position of this iterator in the repository tree. The value
184 	 *            may be null or the empty string to indicate the prefix is the
185 	 *            root of the repository. A trailing slash ('/') is
186 	 *            automatically appended if the prefix does not end in '/'.
187 	 * @param options
188 	 *            working tree options to be used
189 	 */
190 	protected WorkingTreeIterator(final String prefix,
191 			WorkingTreeOptions options) {
192 		super(prefix);
193 		state = new IteratorState(options);
194 	}
195 
196 	/**
197 	 * Create an iterator for a subtree of an existing iterator.
198 	 *
199 	 * @param p
200 	 *            parent tree iterator.
201 	 */
202 	protected WorkingTreeIterator(final WorkingTreeIterator p) {
203 		super(p);
204 		state = p.state;
205 		infoAttributeNode = p.infoAttributeNode;
206 		globalAttributeNode = p.globalAttributeNode;
207 	}
208 
209 	/**
210 	 * Initialize this iterator for the root level of a repository.
211 	 * <p>
212 	 * This method should only be invoked after calling {@link #init(Entry[])},
213 	 * and only for the root iterator.
214 	 *
215 	 * @param repo
216 	 *            the repository.
217 	 */
218 	protected void initRootIterator(Repository repo) {
219 		repository = repo;
220 		Entry entry;
221 		if (ignoreNode instanceof PerDirectoryIgnoreNode)
222 			entry = ((PerDirectoryIgnoreNode) ignoreNode).entry;
223 		else
224 			entry = null;
225 		ignoreNode = new RootIgnoreNode(entry, repo);
226 
227 		infoAttributeNode = new InfoAttributesNode(repo);
228 
229 		globalAttributeNode = new GlobalAttributesNode(repo);
230 	}
231 
232 	/**
233 	 * Define the matching {@link DirCacheIterator}, to optimize ObjectIds.
234 	 *
235 	 * Once the DirCacheIterator has been set this iterator must only be
236 	 * advanced by the TreeWalk that is supplied, as it assumes that itself and
237 	 * the corresponding DirCacheIterator are positioned on the same file path
238 	 * whenever {@link #idBuffer()} is invoked.
239 	 *
240 	 * @param walk
241 	 *            the walk that will be advancing this iterator.
242 	 * @param treeId
243 	 *            index of the matching {@link DirCacheIterator}.
244 	 */
245 	public void setDirCacheIterator(TreeWalk walk, int treeId) {
246 		state.walk = walk;
247 		state.dirCacheTree = treeId;
248 	}
249 
250 	@Override
251 	public boolean hasId() {
252 		if (contentIdFromPtr == ptr)
253 			return true;
254 		return (mode & FileMode.TYPE_MASK) == FileMode.TYPE_FILE;
255 	}
256 
257 	@Override
258 	public byte[] idBuffer() {
259 		if (contentIdFromPtr == ptr)
260 			return contentId;
261 
262 		if (state.walk != null) {
263 			// If there is a matching DirCacheIterator, we can reuse
264 			// its idBuffer, but only if we appear to be clean against
265 			// the cached index information for the path.
266 			//
267 			DirCacheIterator i = state.walk.getTree(state.dirCacheTree,
268 					DirCacheIterator.class);
269 			if (i != null) {
270 				DirCacheEntry ent = i.getDirCacheEntry();
271 				if (ent != null && compareMetadata(ent) == MetadataDiff.EQUAL) {
272 					contentIdOffset = i.idOffset();
273 					contentIdFromPtr = ptr;
274 					return contentId = i.idBuffer();
275 				}
276 				contentIdOffset = 0;
277 			} else {
278 				contentIdOffset = 0;
279 			}
280 		}
281 		switch (mode & FileMode.TYPE_MASK) {
282 		case FileMode.TYPE_SYMLINK:
283 		case FileMode.TYPE_FILE:
284 			contentIdFromPtr = ptr;
285 			return contentId = idBufferBlob(entries[ptr]);
286 		case FileMode.TYPE_GITLINK:
287 			contentIdFromPtr = ptr;
288 			return contentId = idSubmodule(entries[ptr]);
289 		}
290 		return zeroid;
291 	}
292 
293 	/**
294 	 * Get submodule id for given entry.
295 	 *
296 	 * @param e
297 	 * @return non-null submodule id
298 	 */
299 	protected byte[] idSubmodule(Entry e) {
300 		if (repository == null)
301 			return zeroid;
302 		File directory;
303 		try {
304 			directory = repository.getWorkTree();
305 		} catch (NoWorkTreeException nwte) {
306 			return zeroid;
307 		}
308 		return idSubmodule(directory, e);
309 	}
310 
311 	/**
312 	 * Get submodule id using the repository at the location of the entry
313 	 * relative to the directory.
314 	 *
315 	 * @param directory
316 	 * @param e
317 	 * @return non-null submodule id
318 	 */
319 	protected byte[] idSubmodule(File directory, Entry e) {
320 		final Repository submoduleRepo;
321 		try {
322 			submoduleRepo = SubmoduleWalk.getSubmoduleRepository(directory,
323 					e.getName());
324 		} catch (IOException exception) {
325 			return zeroid;
326 		}
327 		if (submoduleRepo == null)
328 			return zeroid;
329 
330 		final ObjectId head;
331 		try {
332 			head = submoduleRepo.resolve(Constants.HEAD);
333 		} catch (IOException exception) {
334 			return zeroid;
335 		} finally {
336 			submoduleRepo.close();
337 		}
338 		if (head == null)
339 			return zeroid;
340 		final byte[] id = new byte[Constants.OBJECT_ID_LENGTH];
341 		head.copyRawTo(id, 0);
342 		return id;
343 	}
344 
345 	private static final byte[] digits = { '0', '1', '2', '3', '4', '5', '6',
346 			'7', '8', '9' };
347 
348 	private static final byte[] hblob = Constants
349 			.encodedTypeString(Constants.OBJ_BLOB);
350 
351 	private byte[] idBufferBlob(final Entry e) {
352 		try {
353 			final InputStream is = e.openInputStream();
354 			if (is == null)
355 				return zeroid;
356 			try {
357 				state.initializeDigestAndReadBuffer();
358 
359 				final long len = e.getLength();
360 				InputStream filteredIs = possiblyFilteredInputStream(e, is, len);
361 				return computeHash(filteredIs, canonLen);
362 			} finally {
363 				safeClose(is);
364 			}
365 		} catch (IOException err) {
366 			// Can't read the file? Don't report the failure either.
367 			return zeroid;
368 		}
369 	}
370 
371 	private InputStream possiblyFilteredInputStream(final Entry e,
372 			final InputStream is, final long len) throws IOException {
373 		if (!mightNeedCleaning()) {
374 			canonLen = len;
375 			return is;
376 		}
377 
378 		if (len <= MAXIMUM_FILE_SIZE_TO_READ_FULLY) {
379 			ByteBuffer rawbuf = IO.readWholeStream(is, (int) len);
380 			byte[] raw = rawbuf.array();
381 			int n = rawbuf.limit();
382 			if (!isBinary(raw, n)) {
383 				rawbuf = filterClean(raw, n);
384 				raw = rawbuf.array();
385 				n = rawbuf.limit();
386 			}
387 			canonLen = n;
388 			return new ByteArrayInputStream(raw, 0, n);
389 		}
390 
391 		if (isBinary(e)) {
392 			canonLen = len;
393 			return is;
394 		}
395 
396 		final InputStream lenIs = filterClean(e.openInputStream());
397 		try {
398 			canonLen = computeLength(lenIs);
399 		} finally {
400 			safeClose(lenIs);
401 		}
402 		return filterClean(is);
403 	}
404 
405 	private static void safeClose(final InputStream in) {
406 		try {
407 			in.close();
408 		} catch (IOException err2) {
409 			// Suppress any error related to closing an input
410 			// stream. We don't care, we should not have any
411 			// outstanding data to flush or anything like that.
412 		}
413 	}
414 
415 	private boolean mightNeedCleaning() {
416 		switch (getOptions().getAutoCRLF()) {
417 		case FALSE:
418 		default:
419 			return false;
420 
421 		case TRUE:
422 		case INPUT:
423 			return true;
424 		}
425 	}
426 
427 	private static boolean isBinary(byte[] content, int sz) {
428 		return RawText.isBinary(content, sz);
429 	}
430 
431 	private static boolean isBinary(Entry entry) throws IOException {
432 		InputStream in = entry.openInputStream();
433 		try {
434 			return RawText.isBinary(in);
435 		} finally {
436 			safeClose(in);
437 		}
438 	}
439 
440 	private static ByteBuffer filterClean(byte[] src, int n)
441 			throws IOException {
442 		InputStream in = new ByteArrayInputStream(src);
443 		try {
444 			return IO.readWholeStream(filterClean(in), n);
445 		} finally {
446 			safeClose(in);
447 		}
448 	}
449 
450 	private static InputStream filterClean(InputStream in) {
451 		return new EolCanonicalizingInputStream(in, true);
452 	}
453 
454 	/**
455 	 * Returns the working tree options used by this iterator.
456 	 *
457 	 * @return working tree options
458 	 */
459 	public WorkingTreeOptions getOptions() {
460 		return state.options;
461 	}
462 
463 	@Override
464 	public int idOffset() {
465 		return contentIdOffset;
466 	}
467 
468 	@Override
469 	public void reset() {
470 		if (!first()) {
471 			ptr = 0;
472 			if (!eof())
473 				parseEntry();
474 		}
475 	}
476 
477 	@Override
478 	public boolean first() {
479 		return ptr == 0;
480 	}
481 
482 	@Override
483 	public boolean eof() {
484 		return ptr == entryCnt;
485 	}
486 
487 	@Override
488 	public void next(final int delta) throws CorruptObjectException {
489 		ptr += delta;
490 		if (!eof()) {
491 			parseEntry();
492 		}
493 	}
494 
495 	@Override
496 	public void back(final int delta) throws CorruptObjectException {
497 		ptr -= delta;
498 		parseEntry();
499 	}
500 
501 	private void parseEntry() {
502 		final Entry e = entries[ptr];
503 		mode = e.getMode().getBits();
504 
505 		final int nameLen = e.encodedNameLen;
506 		ensurePathCapacity(pathOffset + nameLen, pathOffset);
507 		System.arraycopy(e.encodedName, 0, path, pathOffset, nameLen);
508 		pathLen = pathOffset + nameLen;
509 		canonLen = -1;
510 	}
511 
512 	/**
513 	 * Get the raw byte length of this entry.
514 	 *
515 	 * @return size of this file, in bytes.
516 	 */
517 	public long getEntryLength() {
518 		return current().getLength();
519 	}
520 
521 	/**
522 	 * Get the filtered input length of this entry
523 	 *
524 	 * @return size of the content, in bytes
525 	 * @throws IOException
526 	 */
527 	public long getEntryContentLength() throws IOException {
528 		if (canonLen == -1) {
529 			long rawLen = getEntryLength();
530 			if (rawLen == 0)
531 				canonLen = 0;
532 			InputStream is = current().openInputStream();
533 			try {
534 				// canonLen gets updated here
535 				possiblyFilteredInputStream(current(), is, current()
536 						.getLength());
537 			} finally {
538 				safeClose(is);
539 			}
540 		}
541 		return canonLen;
542 	}
543 
544 	/**
545 	 * Get the last modified time of this entry.
546 	 *
547 	 * @return last modified time of this file, in milliseconds since the epoch
548 	 *         (Jan 1, 1970 UTC).
549 	 */
550 	public long getEntryLastModified() {
551 		return current().getLastModified();
552 	}
553 
554 	/**
555 	 * Obtain an input stream to read the file content.
556 	 * <p>
557 	 * Efficient implementations are not required. The caller will usually
558 	 * obtain the stream only once per entry, if at all.
559 	 * <p>
560 	 * The input stream should not use buffering if the implementation can avoid
561 	 * it. The caller will buffer as necessary to perform efficient block IO
562 	 * operations.
563 	 * <p>
564 	 * The caller will close the stream once complete.
565 	 *
566 	 * @return a stream to read from the file.
567 	 * @throws IOException
568 	 *             the file could not be opened for reading.
569 	 */
570 	public InputStream openEntryStream() throws IOException {
571 		InputStream rawis = current().openInputStream();
572 		if (mightNeedCleaning())
573 			return filterClean(rawis);
574 		else
575 			return rawis;
576 	}
577 
578 	/**
579 	 * Determine if the current entry path is ignored by an ignore rule.
580 	 *
581 	 * @return true if the entry was ignored by an ignore rule file.
582 	 * @throws IOException
583 	 *             a relevant ignore rule file exists but cannot be read.
584 	 */
585 	public boolean isEntryIgnored() throws IOException {
586 		return isEntryIgnored(pathLen);
587 	}
588 
589 	/**
590 	 * Determine if the entry path is ignored by an ignore rule.
591 	 *
592 	 * @param pLen
593 	 *            the length of the path in the path buffer.
594 	 * @return true if the entry is ignored by an ignore rule.
595 	 * @throws IOException
596 	 *             a relevant ignore rule file exists but cannot be read.
597 	 */
598 	protected boolean isEntryIgnored(final int pLen) throws IOException {
599 		return isEntryIgnored(pLen, mode, false);
600 	}
601 
602 	/**
603 	 * Determine if the entry path is ignored by an ignore rule. Consider
604 	 * possible rule negation from child iterator.
605 	 *
606 	 * @param pLen
607 	 *            the length of the path in the path buffer.
608 	 * @param fileMode
609 	 *            the original iterator file mode
610 	 * @param negatePrevious
611 	 *            true if the previous matching iterator rule was negation
612 	 * @return true if the entry is ignored by an ignore rule.
613 	 * @throws IOException
614 	 *             a relevant ignore rule file exists but cannot be read.
615 	 */
616 	private boolean isEntryIgnored(final int pLen, int fileMode,
617 			boolean negatePrevious)
618 			throws IOException {
619 		IgnoreNode rules = getIgnoreNode();
620 		if (rules != null) {
621 			// The ignore code wants path to start with a '/' if possible.
622 			// If we have the '/' in our path buffer because we are inside
623 			// a subdirectory include it in the range we convert to string.
624 			//
625 			int pOff = pathOffset;
626 			if (0 < pOff)
627 				pOff--;
628 			String p = TreeWalk.pathOf(path, pOff, pLen);
629 			switch (rules.isIgnored(p, FileMode.TREE.equals(fileMode),
630 					negatePrevious)) {
631 			case IGNORED:
632 				return true;
633 			case NOT_IGNORED:
634 				return false;
635 			case CHECK_PARENT:
636 				negatePrevious = false;
637 				break;
638 			case CHECK_PARENT_NEGATE_FIRST_MATCH:
639 				negatePrevious = true;
640 				break;
641 			}
642 		}
643 		if (parent instanceof WorkingTreeIterator)
644 			return ((WorkingTreeIterator) parent).isEntryIgnored(pLen, fileMode,
645 					negatePrevious);
646 		return false;
647 	}
648 
649 	private IgnoreNode getIgnoreNode() throws IOException {
650 		if (ignoreNode instanceof PerDirectoryIgnoreNode)
651 			ignoreNode = ((PerDirectoryIgnoreNode) ignoreNode).load();
652 		return ignoreNode;
653 	}
654 
655 	/**
656 	 * Retrieves the {@link AttributesNode} for the current entry.
657 	 *
658 	 * @return {@link AttributesNode} for the current entry.
659 	 * @throws IOException
660 	 *             if an error is raised while parsing the .gitattributes file
661 	 * @since 3.7
662 	 */
663 	public AttributesNode getEntryAttributesNode() throws IOException {
664 		if (attributesNode instanceof PerDirectoryAttributesNode)
665 			attributesNode = ((PerDirectoryAttributesNode) attributesNode)
666 					.load();
667 		return attributesNode;
668 	}
669 
670 	/**
671 	 * Retrieves the {@link AttributesNode} that holds the information located
672 	 * in $GIT_DIR/info/attributes file.
673 	 *
674 	 * @return the {@link AttributesNode} that holds the information located in
675 	 *         $GIT_DIR/info/attributes file.
676 	 * @throws IOException
677 	 *             if an error is raised while parsing the attributes file
678 	 * @since 3.7
679 	 */
680 	public AttributesNode getInfoAttributesNode() throws IOException {
681 		if (infoAttributeNode instanceof InfoAttributesNode)
682 			infoAttributeNode = ((InfoAttributesNode) infoAttributeNode).load();
683 		return infoAttributeNode;
684 	}
685 
686 	/**
687 	 * Retrieves the {@link AttributesNode} that holds the information located
688 	 * in system-wide file.
689 	 *
690 	 * @return the {@link AttributesNode} that holds the information located in
691 	 *         system-wide file.
692 	 * @throws IOException
693 	 *             IOException if an error is raised while parsing the
694 	 *             attributes file
695 	 * @see CoreConfig#getAttributesFile()
696 	 * @since 3.7
697 	 */
698 	public AttributesNode getGlobalAttributesNode() throws IOException {
699 		if (globalAttributeNode instanceof GlobalAttributesNode)
700 			globalAttributeNode = ((GlobalAttributesNode) globalAttributeNode)
701 					.load();
702 		return globalAttributeNode;
703 	}
704 
705 	private static final Comparator<Entry> ENTRY_CMP = new Comparator<Entry>() {
706 		public int compare(final Entry o1, final Entry o2) {
707 			final byte[] a = o1.encodedName;
708 			final byte[] b = o2.encodedName;
709 			final int aLen = o1.encodedNameLen;
710 			final int bLen = o2.encodedNameLen;
711 			int cPos;
712 
713 			for (cPos = 0; cPos < aLen && cPos < bLen; cPos++) {
714 				final int cmp = (a[cPos] & 0xff) - (b[cPos] & 0xff);
715 				if (cmp != 0)
716 					return cmp;
717 			}
718 
719 			if (cPos < aLen)
720 				return (a[cPos] & 0xff) - lastPathChar(o2);
721 			if (cPos < bLen)
722 				return lastPathChar(o1) - (b[cPos] & 0xff);
723 			return lastPathChar(o1) - lastPathChar(o2);
724 		}
725 	};
726 
727 	static int lastPathChar(final Entry e) {
728 		return e.getMode() == FileMode.TREE ? '/' : '\0';
729 	}
730 
731 	/**
732 	 * Constructor helper.
733 	 *
734 	 * @param list
735 	 *            files in the subtree of the work tree this iterator operates
736 	 *            on
737 	 */
738 	protected void init(final Entry[] list) {
739 		// Filter out nulls, . and .. as these are not valid tree entries,
740 		// also cache the encoded forms of the path names for efficient use
741 		// later on during sorting and iteration.
742 		//
743 		entries = list;
744 		int i, o;
745 
746 		final CharsetEncoder nameEncoder = state.nameEncoder;
747 		for (i = 0, o = 0; i < entries.length; i++) {
748 			final Entry e = entries[i];
749 			if (e == null)
750 				continue;
751 			final String name = e.getName();
752 			if (".".equals(name) || "..".equals(name)) //$NON-NLS-1$ //$NON-NLS-2$
753 				continue;
754 			if (Constants.DOT_GIT.equals(name))
755 				continue;
756 			if (Constants.DOT_GIT_IGNORE.equals(name))
757 				ignoreNode = new PerDirectoryIgnoreNode(e);
758 			if (Constants.DOT_GIT_ATTRIBUTES.equals(name))
759 				attributesNode = new PerDirectoryAttributesNode(e);
760 			if (i != o)
761 				entries[o] = e;
762 			e.encodeName(nameEncoder);
763 			o++;
764 		}
765 		entryCnt = o;
766 		Arrays.sort(entries, 0, entryCnt, ENTRY_CMP);
767 
768 		contentIdFromPtr = -1;
769 		ptr = 0;
770 		if (!eof())
771 			parseEntry();
772 		else if (pathLen == 0) // see bug 445363
773 			pathLen = pathOffset;
774 	}
775 
776 	/**
777 	 * Obtain the current entry from this iterator.
778 	 *
779 	 * @return the currently selected entry.
780 	 */
781 	protected Entry current() {
782 		return entries[ptr];
783 	}
784 
785 	/**
786 	 * The result of a metadata-comparison between the current entry and a
787 	 * {@link DirCacheEntry}
788 	 */
789 	public enum MetadataDiff {
790 		/**
791 		 * The entries are equal by metaData (mode, length,
792 		 * modification-timestamp) or the <code>assumeValid</code> attribute of
793 		 * the index entry is set
794 		 */
795 		EQUAL,
796 
797 		/**
798 		 * The entries are not equal by metaData (mode, length) or the
799 		 * <code>isUpdateNeeded</code> attribute of the index entry is set
800 		 */
801 		DIFFER_BY_METADATA,
802 
803 		/** index entry is smudged - can't use that entry for comparison */
804 		SMUDGED,
805 
806 		/**
807 		 * The entries are equal by metaData (mode, length) but differ by
808 		 * modification-timestamp.
809 		 */
810 		DIFFER_BY_TIMESTAMP
811 	}
812 
813 	/**
814 	 * Is the file mode of the current entry different than the given raw mode?
815 	 *
816 	 * @param rawMode
817 	 * @return true if different, false otherwise
818 	 */
819 	public boolean isModeDifferent(final int rawMode) {
820 		// Determine difference in mode-bits of file and index-entry. In the
821 		// bitwise presentation of modeDiff we'll have a '1' when the two modes
822 		// differ at this position.
823 		int modeDiff = getEntryRawMode() ^ rawMode;
824 
825 		if (modeDiff == 0)
826 			return false;
827 
828 		// Do not rely on filemode differences in case of symbolic links
829 		if (getOptions().getSymLinks() == SymLinks.FALSE)
830 			if (FileMode.SYMLINK.equals(rawMode))
831 				return false;
832 
833 		// Ignore the executable file bits if WorkingTreeOptions tell me to
834 		// do so. Ignoring is done by setting the bits representing a
835 		// EXECUTABLE_FILE to '0' in modeDiff
836 		if (!state.options.isFileMode())
837 			modeDiff &= ~FileMode.EXECUTABLE_FILE.getBits();
838 		return modeDiff != 0;
839 	}
840 
841 	/**
842 	 * Compare the metadata (mode, length, modification-timestamp) of the
843 	 * current entry and a {@link DirCacheEntry}
844 	 *
845 	 * @param entry
846 	 *            the {@link DirCacheEntry} to compare with
847 	 * @return a {@link MetadataDiff} which tells whether and how the entries
848 	 *         metadata differ
849 	 */
850 	public MetadataDiff compareMetadata(DirCacheEntry entry) {
851 		if (entry.isAssumeValid())
852 			return MetadataDiff.EQUAL;
853 
854 		if (entry.isUpdateNeeded())
855 			return MetadataDiff.DIFFER_BY_METADATA;
856 
857 		if (!entry.isSmudged() && entry.getLength() != (int) getEntryLength())
858 			return MetadataDiff.DIFFER_BY_METADATA;
859 
860 		if (isModeDifferent(entry.getRawMode()))
861 			return MetadataDiff.DIFFER_BY_METADATA;
862 
863 		// Git under windows only stores seconds so we round the timestamp
864 		// Java gives us if it looks like the timestamp in index is seconds
865 		// only. Otherwise we compare the timestamp at millisecond precision,
866 		// unless core.checkstat is set to "minimal", in which case we only
867 		// compare the whole second part.
868 		long cacheLastModified = entry.getLastModified();
869 		long fileLastModified = getEntryLastModified();
870 		long lastModifiedMillis = fileLastModified % 1000;
871 		long cacheMillis = cacheLastModified % 1000;
872 		if (getOptions().getCheckStat() == CheckStat.MINIMAL) {
873 			fileLastModified = fileLastModified - lastModifiedMillis;
874 			cacheLastModified = cacheLastModified - cacheMillis;
875 		} else if (cacheMillis == 0)
876 			fileLastModified = fileLastModified - lastModifiedMillis;
877 		// Some Java version on Linux return whole seconds only even when
878 		// the file systems supports more precision.
879 		else if (lastModifiedMillis == 0)
880 			cacheLastModified = cacheLastModified - cacheMillis;
881 
882 		if (fileLastModified != cacheLastModified)
883 			return MetadataDiff.DIFFER_BY_TIMESTAMP;
884 		else if (!entry.isSmudged())
885 			// The file is clean when you look at timestamps.
886 			return MetadataDiff.EQUAL;
887 		else
888 			return MetadataDiff.SMUDGED;
889 	}
890 
891 	/**
892 	 * Checks whether this entry differs from a given entry from the
893 	 * {@link DirCache}.
894 	 *
895 	 * File status information is used and if status is same we consider the
896 	 * file identical to the state in the working directory. Native git uses
897 	 * more stat fields than we have accessible in Java.
898 	 *
899 	 * @param entry
900 	 *            the entry from the dircache we want to compare against
901 	 * @param forceContentCheck
902 	 *            True if the actual file content should be checked if
903 	 *            modification time differs.
904 	 * @param reader
905 	 *            access to repository objects if necessary. Should not be null.
906 	 * @return true if content is most likely different.
907 	 * @throws IOException
908 	 * @since 3.3
909 	 */
910 	public boolean isModified(DirCacheEntry entry, boolean forceContentCheck,
911 			ObjectReader reader) throws IOException {
912 		if (entry == null)
913 			return !FileMode.MISSING.equals(getEntryFileMode());
914 		MetadataDiff diff = compareMetadata(entry);
915 		switch (diff) {
916 		case DIFFER_BY_TIMESTAMP:
917 			if (forceContentCheck)
918 				// But we are told to look at content even though timestamps
919 				// tell us about modification
920 				return contentCheck(entry, reader);
921 			else
922 				// We are told to assume a modification if timestamps differs
923 				return true;
924 		case SMUDGED:
925 			// The file is clean by timestamps but the entry was smudged.
926 			// Lets do a content check
927 			return contentCheck(entry, reader);
928 		case EQUAL:
929 			return false;
930 		case DIFFER_BY_METADATA:
931 			if (mode == FileMode.SYMLINK.getBits())
932 				return contentCheck(entry, reader);
933 			return true;
934 		default:
935 			throw new IllegalStateException(MessageFormat.format(
936 					JGitText.get().unexpectedCompareResult, diff.name()));
937 		}
938 	}
939 
940 	/**
941 	 * Get the file mode to use for the current entry when it is to be updated
942 	 * in the index.
943 	 *
944 	 * @param indexIter
945 	 *            {@link DirCacheIterator} positioned at the same entry as this
946 	 *            iterator or null if no {@link DirCacheIterator} is available
947 	 *            at this iterator's current entry
948 	 * @return index file mode
949 	 */
950 	public FileMode getIndexFileMode(final DirCacheIterator indexIter) {
951 		final FileMode wtMode = getEntryFileMode();
952 		if (indexIter == null)
953 			return wtMode;
954 		if (getOptions().isFileMode())
955 			return wtMode;
956 		final FileMode iMode = indexIter.getEntryFileMode();
957 		if (FileMode.REGULAR_FILE == wtMode
958 				&& FileMode.EXECUTABLE_FILE == iMode)
959 			return iMode;
960 		if (FileMode.EXECUTABLE_FILE == wtMode
961 				&& FileMode.REGULAR_FILE == iMode)
962 			return iMode;
963 		return wtMode;
964 	}
965 
966 	/**
967 	 * Compares the entries content with the content in the filesystem.
968 	 * Unsmudges the entry when it is detected that it is clean.
969 	 *
970 	 * @param entry
971 	 *            the entry to be checked
972 	 * @param reader
973 	 *            acccess to repository data if necessary
974 	 * @return <code>true</code> if the content doesn't match,
975 	 *         <code>false</code> if it matches
976 	 * @throws IOException
977 	 */
978 	private boolean contentCheck(DirCacheEntry entry, ObjectReader reader)
979 			throws IOException {
980 		if (getEntryObjectId().equals(entry.getObjectId())) {
981 			// Content has not changed
982 
983 			// We know the entry can't be racily clean because it's still clean.
984 			// Therefore we unsmudge the entry!
985 			// If by any chance we now unsmudge although we are still in the
986 			// same time-slot as the last modification to the index file the
987 			// next index write operation will smudge again.
988 			// Caution: we are unsmudging just by setting the length of the
989 			// in-memory entry object. It's the callers task to detect that we
990 			// have modified the entry and to persist the modified index.
991 			entry.setLength((int) getEntryLength());
992 
993 			return false;
994 		} else {
995 			if (mode == FileMode.SYMLINK.getBits())
996 				return !new File(readContentAsNormalizedString(current()))
997 						.equals(new File((readContentAsNormalizedString(entry,
998 								reader))));
999 			// Content differs: that's a real change, perhaps
1000 			if (reader == null) // deprecated use, do no further checks
1001 				return true;
1002 			switch (getOptions().getAutoCRLF()) {
1003 			case INPUT:
1004 			case TRUE:
1005 				InputStream dcIn = null;
1006 				try {
1007 					ObjectLoader loader = reader.open(entry.getObjectId());
1008 					if (loader == null)
1009 						return true;
1010 
1011 					// We need to compute the length, but only if it is not
1012 					// a binary stream.
1013 					dcIn = new EolCanonicalizingInputStream(
1014 							loader.openStream(), true, true /* abort if binary */);
1015 					long dcInLen;
1016 					try {
1017 						dcInLen = computeLength(dcIn);
1018 					} catch (EolCanonicalizingInputStream.IsBinaryException e) {
1019 						return true;
1020 					} finally {
1021 						dcIn.close();
1022 					}
1023 
1024 					dcIn = new EolCanonicalizingInputStream(
1025 							loader.openStream(), true);
1026 					byte[] autoCrLfHash = computeHash(dcIn, dcInLen);
1027 					boolean changed = getEntryObjectId().compareTo(
1028 							autoCrLfHash, 0) != 0;
1029 					return changed;
1030 				} catch (IOException e) {
1031 					return true;
1032 				} finally {
1033 					if (dcIn != null)
1034 						try {
1035 							dcIn.close();
1036 						} catch (IOException e) {
1037 							// empty
1038 						}
1039 				}
1040 			case FALSE:
1041 				break;
1042 			}
1043 			return true;
1044 		}
1045 	}
1046 
1047 	private static String readContentAsNormalizedString(DirCacheEntry entry,
1048 			ObjectReader reader) throws MissingObjectException, IOException {
1049 		ObjectLoader open = reader.open(entry.getObjectId());
1050 		byte[] cachedBytes = open.getCachedBytes();
1051 		return FS.detect().normalize(RawParseUtils.decode(cachedBytes));
1052 	}
1053 
1054 	private static String readContentAsNormalizedString(Entry entry) throws IOException {
1055 		long length = entry.getLength();
1056 		byte[] content = new byte[(int) length];
1057 		InputStream is = entry.openInputStream();
1058 		IO.readFully(is, content, 0, (int) length);
1059 		return FS.detect().normalize(RawParseUtils.decode(content));
1060 	}
1061 
1062 	private static long computeLength(InputStream in) throws IOException {
1063 		// Since we only care about the length, use skip. The stream
1064 		// may be able to more efficiently wade through its data.
1065 		//
1066 		long length = 0;
1067 		for (;;) {
1068 			long n = in.skip(1 << 20);
1069 			if (n <= 0)
1070 				break;
1071 			length += n;
1072 		}
1073 		return length;
1074 	}
1075 
1076 	private byte[] computeHash(InputStream in, long length) throws IOException {
1077 		final MessageDigest contentDigest = state.contentDigest;
1078 		final byte[] contentReadBuffer = state.contentReadBuffer;
1079 
1080 		contentDigest.reset();
1081 		contentDigest.update(hblob);
1082 		contentDigest.update((byte) ' ');
1083 
1084 		long sz = length;
1085 		if (sz == 0) {
1086 			contentDigest.update((byte) '0');
1087 		} else {
1088 			final int bufn = contentReadBuffer.length;
1089 			int p = bufn;
1090 			do {
1091 				contentReadBuffer[--p] = digits[(int) (sz % 10)];
1092 				sz /= 10;
1093 			} while (sz > 0);
1094 			contentDigest.update(contentReadBuffer, p, bufn - p);
1095 		}
1096 		contentDigest.update((byte) 0);
1097 
1098 		for (;;) {
1099 			final int r = in.read(contentReadBuffer);
1100 			if (r <= 0)
1101 				break;
1102 			contentDigest.update(contentReadBuffer, 0, r);
1103 			sz += r;
1104 		}
1105 		if (sz != length)
1106 			return zeroid;
1107 		return contentDigest.digest();
1108 	}
1109 
1110 	/** A single entry within a working directory tree. */
1111 	protected static abstract class Entry {
1112 		byte[] encodedName;
1113 
1114 		int encodedNameLen;
1115 
1116 		void encodeName(final CharsetEncoder enc) {
1117 			final ByteBuffer b;
1118 			try {
1119 				b = enc.encode(CharBuffer.wrap(getName()));
1120 			} catch (CharacterCodingException e) {
1121 				// This should so never happen.
1122 				throw new RuntimeException(MessageFormat.format(
1123 						JGitText.get().unencodeableFile, getName()));
1124 			}
1125 
1126 			encodedNameLen = b.limit();
1127 			if (b.hasArray() && b.arrayOffset() == 0)
1128 				encodedName = b.array();
1129 			else
1130 				b.get(encodedName = new byte[encodedNameLen]);
1131 		}
1132 
1133 		public String toString() {
1134 			return getMode().toString() + " " + getName(); //$NON-NLS-1$
1135 		}
1136 
1137 		/**
1138 		 * Get the type of this entry.
1139 		 * <p>
1140 		 * <b>Note: Efficient implementation required.</b>
1141 		 * <p>
1142 		 * The implementation of this method must be efficient. If a subclass
1143 		 * needs to compute the value they should cache the reference within an
1144 		 * instance member instead.
1145 		 *
1146 		 * @return a file mode constant from {@link FileMode}.
1147 		 */
1148 		public abstract FileMode getMode();
1149 
1150 		/**
1151 		 * Get the byte length of this entry.
1152 		 * <p>
1153 		 * <b>Note: Efficient implementation required.</b>
1154 		 * <p>
1155 		 * The implementation of this method must be efficient. If a subclass
1156 		 * needs to compute the value they should cache the reference within an
1157 		 * instance member instead.
1158 		 *
1159 		 * @return size of this file, in bytes.
1160 		 */
1161 		public abstract long getLength();
1162 
1163 		/**
1164 		 * Get the last modified time of this entry.
1165 		 * <p>
1166 		 * <b>Note: Efficient implementation required.</b>
1167 		 * <p>
1168 		 * The implementation of this method must be efficient. If a subclass
1169 		 * needs to compute the value they should cache the reference within an
1170 		 * instance member instead.
1171 		 *
1172 		 * @return time since the epoch (in ms) of the last change.
1173 		 */
1174 		public abstract long getLastModified();
1175 
1176 		/**
1177 		 * Get the name of this entry within its directory.
1178 		 * <p>
1179 		 * Efficient implementations are not required. The caller will obtain
1180 		 * the name only once and cache it once obtained.
1181 		 *
1182 		 * @return name of the entry.
1183 		 */
1184 		public abstract String getName();
1185 
1186 		/**
1187 		 * Obtain an input stream to read the file content.
1188 		 * <p>
1189 		 * Efficient implementations are not required. The caller will usually
1190 		 * obtain the stream only once per entry, if at all.
1191 		 * <p>
1192 		 * The input stream should not use buffering if the implementation can
1193 		 * avoid it. The caller will buffer as necessary to perform efficient
1194 		 * block IO operations.
1195 		 * <p>
1196 		 * The caller will close the stream once complete.
1197 		 *
1198 		 * @return a stream to read from the file.
1199 		 * @throws IOException
1200 		 *             the file could not be opened for reading.
1201 		 */
1202 		public abstract InputStream openInputStream() throws IOException;
1203 	}
1204 
1205 	/** Magic type indicating we know rules exist, but they aren't loaded. */
1206 	private static class PerDirectoryIgnoreNode extends IgnoreNode {
1207 		final Entry entry;
1208 
1209 		PerDirectoryIgnoreNode(Entry entry) {
1210 			super(Collections.<FastIgnoreRule> emptyList());
1211 			this.entry = entry;
1212 		}
1213 
1214 		IgnoreNode load() throws IOException {
1215 			IgnoreNode r = new IgnoreNode();
1216 			InputStream in = entry.openInputStream();
1217 			try {
1218 				r.parse(in);
1219 			} finally {
1220 				in.close();
1221 			}
1222 			return r.getRules().isEmpty() ? null : r;
1223 		}
1224 	}
1225 
1226 	/** Magic type indicating there may be rules for the top level. */
1227 	private static class RootIgnoreNode extends PerDirectoryIgnoreNode {
1228 		final Repository repository;
1229 
1230 		RootIgnoreNode(Entry entry, Repository repository) {
1231 			super(entry);
1232 			this.repository = repository;
1233 		}
1234 
1235 		@Override
1236 		IgnoreNode load() throws IOException {
1237 			IgnoreNode r;
1238 			if (entry != null) {
1239 				r = super.load();
1240 				if (r == null)
1241 					r = new IgnoreNode();
1242 			} else {
1243 				r = new IgnoreNode();
1244 			}
1245 
1246 			FS fs = repository.getFS();
1247 			String path = repository.getConfig().get(CoreConfig.KEY)
1248 					.getExcludesFile();
1249 			if (path != null) {
1250 				File excludesfile;
1251 				if (path.startsWith("~/")) //$NON-NLS-1$
1252 					excludesfile = fs.resolve(fs.userHome(), path.substring(2));
1253 				else
1254 					excludesfile = fs.resolve(null, path);
1255 				loadRulesFromFile(r, excludesfile);
1256 			}
1257 
1258 			File exclude = fs.resolve(repository.getDirectory(),
1259 					Constants.INFO_EXCLUDE);
1260 			loadRulesFromFile(r, exclude);
1261 
1262 			return r.getRules().isEmpty() ? null : r;
1263 		}
1264 
1265 		private static void loadRulesFromFile(IgnoreNode r, File exclude)
1266 				throws FileNotFoundException, IOException {
1267 			if (FS.DETECTED.exists(exclude)) {
1268 				FileInputStream in = new FileInputStream(exclude);
1269 				try {
1270 					r.parse(in);
1271 				} finally {
1272 					in.close();
1273 				}
1274 			}
1275 		}
1276 	}
1277 
1278 	/** Magic type indicating we know rules exist, but they aren't loaded. */
1279 	private static class PerDirectoryAttributesNode extends AttributesNode {
1280 		final Entry entry;
1281 
1282 		PerDirectoryAttributesNode(Entry entry) {
1283 			super(Collections.<AttributesRule> emptyList());
1284 			this.entry = entry;
1285 		}
1286 
1287 		AttributesNode load() throws IOException {
1288 			AttributesNode r = new AttributesNode();
1289 			InputStream in = entry.openInputStream();
1290 			try {
1291 				r.parse(in);
1292 			} finally {
1293 				in.close();
1294 			}
1295 			return r.getRules().isEmpty() ? null : r;
1296 		}
1297 	}
1298 
1299 	/**
1300 	 * Attributes node loaded from global system-wide file.
1301 	 */
1302 	private static class GlobalAttributesNode extends AttributesNode {
1303 		final Repository repository;
1304 
1305 		GlobalAttributesNode(Repository repository) {
1306 			this.repository = repository;
1307 		}
1308 
1309 		AttributesNode load() throws IOException {
1310 			AttributesNode r = new AttributesNode();
1311 
1312 			FS fs = repository.getFS();
1313 			String path = repository.getConfig().get(CoreConfig.KEY)
1314 					.getAttributesFile();
1315 			if (path != null) {
1316 				File attributesFile;
1317 				if (path.startsWith("~/")) //$NON-NLS-1$
1318 					attributesFile = fs.resolve(fs.userHome(),
1319 							path.substring(2));
1320 				else
1321 					attributesFile = fs.resolve(null, path);
1322 				loadRulesFromFile(r, attributesFile);
1323 			}
1324 			return r.getRules().isEmpty() ? null : r;
1325 		}
1326 	}
1327 
1328 	/** Magic type indicating there may be rules for the top level. */
1329 	private static class InfoAttributesNode extends AttributesNode {
1330 		final Repository repository;
1331 
1332 		InfoAttributesNode(Repository repository) {
1333 			this.repository = repository;
1334 		}
1335 
1336 		AttributesNode load() throws IOException {
1337 			AttributesNode r = new AttributesNode();
1338 
1339 			FS fs = repository.getFS();
1340 
1341 			File attributes = fs.resolve(repository.getDirectory(),
1342 					"info/attributes"); //$NON-NLS-1$
1343 			loadRulesFromFile(r, attributes);
1344 
1345 			return r.getRules().isEmpty() ? null : r;
1346 		}
1347 
1348 	}
1349 
1350 	private static void loadRulesFromFile(AttributesNode r, File attrs)
1351 			throws FileNotFoundException, IOException {
1352 		if (attrs.exists()) {
1353 			FileInputStream in = new FileInputStream(attrs);
1354 			try {
1355 				r.parse(in);
1356 			} finally {
1357 				in.close();
1358 			}
1359 		}
1360 	}
1361 
1362 	private static final class IteratorState {
1363 		/** Options used to process the working tree. */
1364 		final WorkingTreeOptions options;
1365 
1366 		/** File name character encoder. */
1367 		final CharsetEncoder nameEncoder;
1368 
1369 		/** Digest computer for {@link #contentId} computations. */
1370 		MessageDigest contentDigest;
1371 
1372 		/** Buffer used to perform {@link #contentId} computations. */
1373 		byte[] contentReadBuffer;
1374 
1375 		/** TreeWalk with a (supposedly) matching DirCacheIterator. */
1376 		TreeWalk walk;
1377 
1378 		/** Position of the matching {@link DirCacheIterator}. */
1379 		int dirCacheTree;
1380 
1381 		IteratorState(WorkingTreeOptions options) {
1382 			this.options = options;
1383 			this.nameEncoder = Constants.CHARSET.newEncoder();
1384 		}
1385 
1386 		void initializeDigestAndReadBuffer() {
1387 			if (contentDigest == null) {
1388 				contentDigest = Constants.newMessageDigest();
1389 				contentReadBuffer = new byte[BUFFER_SIZE];
1390 			}
1391 		}
1392 	}
1393 }