From bf936aa074777e6d9af999f929a774779c332025 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Wed, 1 May 2019 07:36:14 +1000 Subject: [PATCH 01/35] HFS+: Support listing all attributes for a file. --- .../jnode/fs/hfsplus/HfsPlusDirectory.java | 2 +- .../org/jnode/fs/hfsplus/HfsPlusForkData.java | 4 +- .../jnode/fs/hfsplus/HfsUnicodeString.java | 2 +- .../fs/hfsplus/attributes/AttributeKey.java | 3 +- .../fs/hfsplus/attributes/Attributes.java | 77 ++++++++++++++---- .../org/jnode/fs/hfsplus/catalog/Catalog.java | 12 +-- .../org/jnode/fs/hfsplus/extent/Extent.java | 10 +-- .../fs/hfsplus/extent/ExtentLeafNode.java | 4 +- .../fs/hfsplus/tree/AbstractLeafNode.java | 4 +- .../fs/hfsplus/HfsPlusFileSystemTest.java | 57 ++++++++++++- .../jnode/test/fs/hfsplus/attributes.dd.gz | Bin 0 -> 106179 bytes 11 files changed, 136 insertions(+), 39 deletions(-) create mode 100644 fs/src/test/org/jnode/test/fs/hfsplus/attributes.dd.gz diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java index 6a88e5f960..bccd78da85 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java @@ -218,7 +218,7 @@ private FSEntryTable readEntries() throws IOException { List pathList = new LinkedList(); HfsPlusFileSystem fs = getFileSystem(); if (fs.getVolumeHeader().getFolderCount() > 0) { - LeafRecord[] records; + List records; if ((folder.getFlags() & CatalogFile.FLAGS_HARDLINK_CHAIN) != 0) { records = fs.getCatalog().getRecords(getHardLinkFolder().getFolderId()); diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java index 5052e2ecf3..d3005ab642 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java @@ -59,7 +59,7 @@ public class HfsPlusForkData { /** * Overflow extents. */ - private ExtentDescriptor[] overflowExtents; + private List overflowExtents; /** * The catalog node ID that owns this fork. @@ -178,7 +178,7 @@ public Collection getAllExtents(HfsPlusFileSystem fileSystem) // Add the overflow extents if the exist if (overflowExtents != null) { - Collections.addAll(allExtents, overflowExtents); + allExtents.addAll(overflowExtents); } return allExtents; diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java index 5f808f49eb..98d6731807 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java @@ -55,7 +55,7 @@ public HfsUnicodeString(final byte[] src, final int offset) { */ public HfsUnicodeString(String string) { this.string = string; - this.length = string.length(); + this.length = string == null ? 0 : string.length(); } public final int getLength() { diff --git a/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java b/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java index f885b78580..432e23bdd0 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java @@ -103,7 +103,8 @@ public boolean equals(Object obj) { return fileId.getId() == otherKey.fileId.getId() && - attributeName.getUnicodeString().equals(otherKey.getAttributeName().getUnicodeString()); + (attributeName.getUnicodeString() == null || otherKey.getAttributeName().getUnicodeString() == null || + attributeName.getUnicodeString().equals(otherKey.getAttributeName().getUnicodeString())); } @Override diff --git a/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java b/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java index 312a86783e..9a5f0c4f78 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java @@ -22,6 +22,10 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; import org.apache.log4j.Logger; import org.jnode.fs.hfsplus.HfsPlusFileSystem; import org.jnode.fs.hfsplus.HfsPlusForkData; @@ -89,12 +93,35 @@ public Attributes(HfsPlusFileSystem fs) throws IOException { } } + /** + * Gets all attributes for the given file. + * + * @param fileId the ID of the file to look up the attributes. + * @return the list of attributes. + * @throws IOException if an error occurs. + */ + public List getAllAttributes(CatalogNodeId fileId) throws IOException { + if (bthr == null) { + return null; + } + + List attributes = new ArrayList(); + + for (LeafRecord record : getAttributeLeafRecords(fileId, null, bthr.getRootNode())) { + if (record != null) { + attributes.add(((AttributeKey) record.getKey()).getAttributeName().getUnicodeString()); + } + } + + return attributes; + } + /** * Looks up an attribute in the attributes file. * - * @param fileId the ID of the file to look up the attribute on. + * @param fileId the ID of the file to look up the attribute on. * @param attributeName the name of the attribute to lookup. - * @return the leaf record, or possibly {code null}. + * @return the attribute data, or possibly {code null}. * @throws IOException if an error occurs. */ public AttributeData getAttribute(CatalogNodeId fileId, String attributeName) throws IOException { @@ -102,25 +129,34 @@ public AttributeData getAttribute(CatalogNodeId fileId, String attributeName) th return null; } - return getAttribute(fileId, attributeName, bthr.getRootNode()); + List records = getAttributeLeafRecords(fileId, attributeName, bthr.getRootNode()); + if (records.isEmpty()) { + return null; + } + + if (records.size() > 1) { + log.warn("Expected a single attribute but got: " + records); + } + + return toAttributeData(fileId, records.get(0)); } /** * Looks up an attribute in the attributes file. * - * @param fileId the ID of the file to look up the attribute on. + * @param fileId the ID of the file to look up the attribute on. * @param attributeName the name of the attribute to lookup. - * @param nodeNumber the index of node where the search begin. - * @return the leaf record, or possibly {code null}. + * @param nodeNumber the index of node where the search begin. + * @return the attribute data, or possibly {code null}. * @throws IOException if an error occurs. */ - public AttributeData getAttribute(CatalogNodeId fileId, String attributeName, long nodeNumber) throws IOException { + public List getAttributeLeafRecords(CatalogNodeId fileId, String attributeName, long nodeNumber) + throws IOException { if (attributesFile.getExtent(0).isEmpty()) { // No attributes - return null; + return Collections.emptyList(); } - LeafRecord leafRecord = null; int nodeSize = bthr.getNodeSize(); ByteBuffer nodeData = ByteBuffer.allocate(nodeSize); attributesFile.read(fs, (nodeNumber * nodeSize), nodeData); @@ -132,21 +168,28 @@ public AttributeData getAttribute(CatalogNodeId fileId, String attributeName, lo AttributeIndexNode node = new AttributeIndexNode(data, nodeSize); IndexRecord[] records = node.findAll(new AttributeKey(fileId, attributeName)); + List leafRecords = new LinkedList(); for (IndexRecord indexRecord : records) { - AttributeData attributeData = getAttribute(fileId, attributeName, indexRecord.getIndex()); - if (attributeData != null) { - return attributeData; - } + leafRecords.addAll(getAttributeLeafRecords(fileId, attributeName, indexRecord.getIndex())); } + return leafRecords; } else if (nodeDescriptor.isLeafNode()) { AttributeLeafNode node = new AttributeLeafNode(data, nodeSize); - leafRecord = node.find(new AttributeKey(fileId, attributeName)); + return node.findAll(new AttributeKey(fileId, attributeName)); + } else { + return Collections.emptyList(); } + } - if (leafRecord == null) { - return null; - } + /** + * Converts a leaf record into an attribute data. + * + * @param fileId the file ID. + * @param leafRecord the leaf record. + * @return the attribute data, or {@code null} if the record cannot be converted. + */ + public AttributeData toAttributeData(CatalogNodeId fileId, LeafRecord leafRecord) { long type = BigEndian.getUInt32(leafRecord.getData(), 0); diff --git a/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java b/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java index 9006b528c7..6efbc744ae 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java @@ -246,7 +246,7 @@ public final LeafRecord getRecord(final CatalogNodeId parentID) throws IOExcepti * @return Array of LeafRecord * @throws IOException */ - public final LeafRecord[] getRecords(final CatalogNodeId parentID) throws IOException { + public final List getRecords(final CatalogNodeId parentID) throws IOException { return getRecords(parentID, getBTHeaderRecord().getRootNode()); } @@ -259,7 +259,7 @@ public final LeafRecord[] getRecords(final CatalogNodeId parentID) throws IOExce * @return Array of LeafRecord * @throws IOException */ - public final LeafRecord[] getRecords(final CatalogNodeId parentID, final long nodeNumber) + public final List getRecords(final CatalogNodeId parentID, final long nodeNumber) throws IOException { try { long currentNodeNumber = nodeNumber; @@ -273,16 +273,16 @@ public final LeafRecord[] getRecords(final CatalogNodeId parentID, final long no IndexRecord[] records = node.findAll(new CatalogKey(parentID)); List lfList = new LinkedList(); for (IndexRecord rec : records) { - LeafRecord[] lfr = getRecords(parentID, rec.getIndex()); - Collections.addAll(lfList, lfr); + List lfr = getRecords(parentID, rec.getIndex()); + lfList.addAll(lfr); } - return lfList.toArray(new LeafRecord[lfList.size()]); + return lfList; } else if (nd.isLeafNode()) { CatalogLeafNode node = new CatalogLeafNode(nodeData.array(), nodeSize); return node.findAll(new CatalogKey(parentID)); } else { log.info(String.format("Node %d wasn't a leaf or index: %s\n%s", nodeNumber, nd, NumberUtils.hex(datas))); - return new LeafRecord[0]; + return Collections.emptyList(); } } catch (Exception e) { diff --git a/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java b/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java index 074538cb40..5cde603d87 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java @@ -102,7 +102,7 @@ public Extent(HfsPlusFileSystem fs) throws IOException { * @return the overflow extents. * @throws IOException if an error occurs. */ - public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key) throws IOException { + public final List getOverflowExtents(final ExtentKey key) throws IOException { return getOverflowExtents(key, bthr.getRootNode()); } @@ -114,7 +114,7 @@ public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key) throws I * @return the overflow extents. * @throws IOException if an error occurs. */ - public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key, long nodeNumber) throws IOException { + public final List getOverflowExtents(final ExtentKey key, long nodeNumber) throws IOException { try { long currentNodeNumber = nodeNumber; int nodeSize = bthr.getNodeSize(); @@ -129,10 +129,10 @@ public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key, long nod IndexRecord[] records = extentNode.findAll(key); List overflowExtents = new LinkedList(); for (IndexRecord record : records) { - Collections.addAll(overflowExtents, getOverflowExtents(key, record.getIndex())); + overflowExtents.addAll(getOverflowExtents(key, record.getIndex())); } - return overflowExtents.toArray(new ExtentDescriptor[overflowExtents.size()]); + return overflowExtents; } else if (nd.isLeafNode()) { ExtentLeafNode node = new ExtentLeafNode(nodeData.array(), nodeSize); @@ -140,7 +140,7 @@ public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key, long nod } else { log.info(String.format("Node %d wasn't a leaf or index: %s\n%s", nodeNumber, nd, NumberUtils.hex(data))); - return new ExtentDescriptor[0]; + return Collections.emptyList(); } } catch (Exception e) { diff --git a/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java b/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java index 1df4f170b3..b41c16276f 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java @@ -69,7 +69,7 @@ protected LeafRecord createRecord(Key key, byte[] nodeData, int offset, int reco * @param key the key to match. * @return the overflow extents. */ - public ExtentDescriptor[] getOverflowExtents(ExtentKey key) { + public List getOverflowExtents(ExtentKey key) { List overflowExtents = new LinkedList(); for (LeafRecord record : findAll(key)) { @@ -80,6 +80,6 @@ public ExtentDescriptor[] getOverflowExtents(ExtentKey key) { } } - return overflowExtents.toArray(new ExtentDescriptor[overflowExtents.size()]); + return overflowExtents; } } diff --git a/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java b/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java index 0b48ca6902..0476c459b1 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java @@ -53,7 +53,7 @@ protected LeafRecord createRecord(Key key, byte[] nodeData, int offset, int reco return new LeafRecord(key, nodeData, offset, recordSize); } - public final LeafRecord[] findAll(K key) { + public final List findAll(K key) { List list = new LinkedList(); for (LeafRecord record : records) { log.debug("Record: " + record.toString() + " Key: " + key); @@ -62,7 +62,7 @@ public final LeafRecord[] findAll(K key) { list.add(record); } } - return list.toArray(new LeafRecord[list.size()]); + return list; } } diff --git a/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java b/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java index 72c95329ff..5c352d0d61 100644 --- a/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java +++ b/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java @@ -22,16 +22,21 @@ import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; import org.jnode.driver.Device; import org.jnode.driver.block.FileDevice; +import org.jnode.fs.FSDirectory; import org.jnode.fs.hfsplus.HFSPlusParams; +import org.jnode.fs.hfsplus.HfsPlusEntry; +import org.jnode.fs.hfsplus.HfsPlusFile; import org.jnode.fs.hfsplus.HfsPlusFileSystem; import org.jnode.fs.hfsplus.HfsPlusFileSystemType; import org.jnode.fs.hfsplus.SuperBlock; +import org.jnode.fs.hfsplus.attributes.AttributeData; +import org.jnode.fs.service.FileSystemService; import org.jnode.test.fs.DataStructureAsserts; -import org.jnode.fs.FSDirectory; import org.jnode.test.fs.FileSystemTestUtils; -import org.jnode.fs.service.FileSystemService; import org.jnode.test.support.TestUtils; import org.junit.Assert; import org.junit.Before; @@ -172,6 +177,54 @@ public void testDiskWithIncorrectCompressedFileOnFile() throws Exception { DataStructureAsserts.assertStructure(fs, expectedStructure); } + @Test + public void testDiskAttributes() throws Exception { + device = new FileDevice(FileSystemTestUtils.getTestFile("test/fs/hfsplus/attributes.dd"), "r"); + HfsPlusFileSystemType type = fss.getFileSystemType(HfsPlusFileSystemType.ID); + HfsPlusFileSystem fs = type.create(device, true); + + String expectedStructure = + "type: HFS+ vol:attributes total:99983360 free:97062912\n" + + " /; \n" + + " .DS_Store; 6148; 7e2a612ff5e605e73b2078557c9aa5c5\n" + + " .fseventsd; \n" + + " 0000000000219f17; 135; b518731969693d10590124b27c8b50cf\n" + + " 0000000000219f18; 70; e899edc609e2d4bac697bc207b7ee5ad\n" + + " 0000000000219f19; 38; ebb12e59f60c213f883d07e17ff3f2d5\n" + + " 0000000000219f1a; 71; 4732d018d8011a207fce3f127a2e6e5d\n" + + " 0000000000219f1b; 38; ebb12e59f60c213f883d07e17ff3f2d5\n" + + " 0000000000219f1c; 71; b619c8d189af0a632e91dec707774d44\n" + + " fseventsd-uuid; 36; 8f91e1c548e1eed2edb44694d8a5c5db\n" + + " .HFS+ Private Directory Data\r; \n" + + " .journal; 524288; 3e87e55c8d321c611f18197770a523e7\n" + + " .journal_info_block; 4096; 469270564228a832e83d2ad16e6d8edc\n" + + " isoparser-1.1.22.pom; 6652; 6c04a5fb9540f1b558dc9465ab3a7ad4\n" + + " \u0000\u0000\u0000\u0000HFS+ Private Data; \n"; + + DataStructureAsserts.assertStructure(fs, expectedStructure); + + HfsPlusEntry entry = (HfsPlusEntry) fs.getRootEntry().getDirectory().getEntry("isoparser-1.1.22.pom"); + HfsPlusFile file = (HfsPlusFile) entry.getFile(); + + List attributes = fs.getAttributes().getAllAttributes(file.getCatalogFile().getFileId()); + AttributeData whereFromAttr = fs.getAttributes().getAttribute(file.getCatalogFile().getFileId(), + "com.apple.metadata:kMDItemWhereFroms"); + ByteBuffer whereFromBuffer = ByteBuffer.allocate(6); + whereFromAttr.read(fs, 0, whereFromBuffer); + String whereFrom = new String(whereFromBuffer.array(), "UTF-8"); + + AttributeData quarantineAttr = fs.getAttributes().getAttribute(file.getCatalogFile().getFileId(), + "com.apple.quarantine"); + ByteBuffer quarantineBuffer = ByteBuffer.allocate((int) quarantineAttr.getSize()); + quarantineAttr.read(fs, 0, quarantineBuffer); + String quarantine = new String(quarantineBuffer.array(), "UTF-8"); + + Assert.assertEquals("[com.apple.metadata:kMDItemWhereFroms, com.apple.quarantine]", attributes.toString()); + Assert.assertEquals(144, whereFromAttr.getSize()); + Assert.assertEquals("bplist", whereFrom); + Assert.assertEquals("0081;5cc8ba96;Chrome;3792716B-4DE2-4A43-BFA3-516714DD5764", quarantine); + } + @Test public void testDiskCompressedHardlinks() throws Exception { device = new FileDevice(FileSystemTestUtils.getTestFile("test/fs/hfsplus/compressed-hardlinks.dd"), "r"); diff --git a/fs/src/test/org/jnode/test/fs/hfsplus/attributes.dd.gz b/fs/src/test/org/jnode/test/fs/hfsplus/attributes.dd.gz new file mode 100644 index 0000000000000000000000000000000000000000..917efa1e1a4276fb6b3e0dcc8e8f35acf0e0636c GIT binary patch literal 106179 zcmeI)cT`hZyD)GZW@Ho*D<}xjK@^lKy=DeR5K)3kktQO&i*!hcGYSk!5fD&X1{IJR zdgxIQ5NT3E3nUZ)A%sp6NI4gM=l#C>uJx_E)?Mp<_pUqc{STy^>^#4H&U4N_EBk>x z{_#iC`uEmc+;)C`_gwGz`#Ji`I5==Ej`Mlg455y#tY=DujUE#3En9r2JT4@9!Q$z! zy$Oz3VeyVf3A4H-^{R&Haz`#&z*u_?3}k-)KDzJB`0jyX+@DrBJKcC?J+pydemfPh z;9%6}nxeu@j1&z+{*Y*GzBF^vAq_NhQlo`o|R;h1!!V(?NE!ie=t=ja{FqdK9T+u8z6 z3V2Z+_dto3-s`oGn0H2>VT(=NHOQY)ls4IP?NCi&_n!VvOE(#ugr~QIrZP*mKBy)R#cv=DHu~vb|uPAGno5Kvm{X#)PDJ99J5ssdidukln?_byKtlHKDtVUJ}!yG*f0gbi}d9-sOFhX|^0=`#%!#^tI<9 zN8%|iwF3v5NCz4rY8$-Xt=pFz#bn;VKZ~n~ELn~VqesOFw8FPzLc{QDUIru>8nvqt{y8jey`=-wGNcc@>Eul(z^W-pZD@KaqdcZ?9Z6It==>5$9b#T zC>tBomi3#ewoqi4WJa^*ky3R(orOLszN8 z*(ozb3pMr~m9ND$sr4Y$M;Lp?m2>n;HkSF0@&04!%fo5>GB4Kl22Ng?QBtXV){@+! zdnzxEFNL6QJEkEv>5@JuNT(Qg@-(&UNEf9AdUrm%EShX)sGy-ao;Gv0(q}g|`%^&v zy=r?WWLBU*ZGUkidPtVVpNhdwrz>$Y(l=_>;s!U9rj* z0lIc01!`-%xF(YOSEeJ-gDY2>x%p;Hdy+6xR6g18a=WWDBiCt|0C9m(cgJe2r-{&Z zV$e)GZs_!*Eq)zZ#$*J|a%aGITE}f_%UyX*!NE>4WEF5xzNfukEcP5*rF}Q`fzMaiFrY1}#!@Wzh$0G{6VKUYf-*g;`4e#;7VD=iS)j5j3 zU%x)u?JwS7l_+$Dc8j*Z`nt5ZulGIzy#tBK;Cgd4cm8@mMw#t3%^P!8XxWCHQh=}8 zHW!vDsx{WOT{@RxCCAYZb-c;q-;t~KF`YtRvCYm4!9SVt!(4uCZS)=2?G{Q=;I8{g zjgoPU~tz5N9!Tvk~*dm!aQj zUKhvtC8(}l}vOr`Jpo%8cy&whT_bv$sZOV zpIQ?m{15Dl%l&h6t!T!}l%9uo>LamiHu|IaGVRl0?lmL5_A{h;ktXcW{nN&&(1k;Yu{vNuZnE9Cj`M}DZ|HI!l_O}*)gtbnQjgLh)H?PXq zAT}?B;TK*`Ixubz3zAYuMD-Jy``h~JPVqEz7|*2O2lwLc>&u=H6;{`lukj>$Cw)Ls zF`CM1YP8RTMcfa>5~ta^HqGo@9{pOqHq3bHet&O~V#e0NpUUsKJW3*r+hml8r+rjXR!5f9cuEjG^7>jGyi^36vvODRHD*x+%>LXy z4J~FrQ$MjTl4q_yf1FsQRmLujB%Hii$KvyX!b!$d@p9&N?r<8@WbV|CfiOhY{+~hX3f0tNN%3ao59FJ#BTmeap7) zLPYNi(UH@WcU6=0Xld`6E*^VbW#i9sPR@&8K!}W}UxxD;u_{LJI`9YhvJ-XzvNtuJJ7KFe0^^Jrw2uL z$ybwRmr5TNEw>xiUO3InKhZ(_XGXpJ;;_*}u~LCt9kZ7rTaBaX*%kUd#>3{=aG?h8 z)~{(*d29J9E*LR3`&$Wo=|NYP!ig*Nfq6eHU+XH$M`}ctdrNdX+s{%!dW}|Hbh*L% z{bjSO<;u2U$m*MA4}AI?cnPilk2AH_peSzDEn(L7t%;@d*;f6CSdoljJ z=Xun;7O5&J(Y*#JkLp70?Y_@R`qLGSL4JHK$dl<(1AVo@^w#LSPj;@ACVYt&ixz&4 zpZ6L>so%-C@-Fq5p*_;*)v6)xA$4`+w0|4Em|=bBXvK-fXR^W0nwf*Ml4?A~U1xf< zhdOcXLDpIr{%+U1{zJsApVg>#f#MnNRpGA`13RN-{WhHSLpD`y@hd*0G))KToM+4wVS2B>t-1O={^^`g&7gE;GWKx`p+_uyyN0Pg z(8JM0EaVmpuKBO}o(eW~pAB~QjMD#*$JnoR{dJm#6C_yrsy2@$C{OpG{hZl0Gl;6$ zd2jrQ-`aaI8NU>p7`BfI>uw?y$k6M*($!g`vQIYQW!77NAd;&?_ZVuHd2H^rcE%}H zG>~ZCqp?*~Pl`*;FfwaCDMv7krc$ZgNMdZZ^8DX39j|OlIr9n&x=tH0{m|d~;clqJ z*}3@BdZ@LAJZj8=oouG?iH};jbNKAE^XGj$G*}4w5_S< zAB9XuE~Yfbg|Bp`^pnfZylP& zR!{vieOkV@&cMiopZ@*%YXjcHO0IS<&PaRsZ2DFk!|CtTgDakuwLi|s_-;+;EMusD z?bAPJg)KGg-@Ji@{Dv$WWk?nSMS<5djtU8Gb19~rC;ud}wmEN0Y@Xl{0yYUrh(wTIQ~o~50eTuNkw zFYh^@hoCvw5*9@2kL2i7Vbz^*yNWj&wac-$c+I`IH4j(F$D9iFZ%8v|hLcO~;#cDM z1q}mQ!X{NBwn87s_AHjts$msi-Z3s zZBCJnM=d|=RrOPijcaB5l^GjSX2*pUauOo534NXW5p*ZD!dK+oiFJ>Y`-s&GN}aTAp?KC&jee$-)vFWeD3yYG2n!ON23t)7*~O!2U9Z}opdON}* zwEA@hJ^!xGTdf&U!>_bo4TomQhzlUnJ63iZisx zwP!Sr{9~Z+TbazK81fM)|GaDPzhnHtw*&(JX#w1mKGF^Ss>qL`JZZYgDOR(XN;O1^ zq~O~1ve@2cpU=6GZ4t4EE4S7U`D!^ZizzG5qe`1~Ti7^HV)*r7)8>WNoY~~TaQ$Hq+|S4GgHuj01yBIK;Sf00e*l5C8%|00;m9AOHk_z~5cqXuy(+UTlFJ7h`N>*m+u2@Q}`T2E)fvBU*9p z8b(tOr|+EgxpbLhU2{OL-wUQ~Q?uHxdwkJtD9V3L(^ z|K#uak1T)Li+pWrdK9*u=XQ|IHKl8{?GzB;P4im5x8JAxCp<f00e*l5cu{4t|=b4nAYSb=Z3|;ui}Y#`L@fBU0I?jl58FZf0W5f zkrp=X7VO|xsr^E;a!3_A+lzQ{xVPIQa3zXHZ zyD;bPoE295VbFm6)a)Nm&RDap0|XZ4ibmA)Fj@uK|F7K_&yBy$93xy@I4{&KOK^XqWc{9oIeuE$y0g{lih2{dEe zhAs2c%rTs|z1aFUwZXnsYSoJ$h0Faf1-{k)(shD&A9jDotl#pMn`9KqM}>B~Li(H@ z?&9Iz_>zhjdIS6N5>&D6Ej1FaqMJ*@SH5@8ne&QJSmKjRbhKsM*w-f1i%ebXv$n{@ zUL-%s38EfpS_;}O`@}=}!p`9sgD_P=NqBOMf2v1@S=WI`C zakuz!FtHP5TabG{n>yJ=-iuDPt5-PQYFH%K7gZil8Z6o8H2vXm`OJ%ezi}HQYtB;l|El9FRh-uugcvf|$&=-ix(<%`s8OqF6I?w%`3e%Te` z>2;>j30YLHp1dG#P@m_U;M=q$kr&5nKF}6_H*g`VMX_nnVc7d2bHlheS>|)7O0AJN zI>SQR-#aKoh3BQOYxgsxjb>u-`0WiViw{E&IeuYPr+eP46W=!Uq=s2Ugl+PS%$6n9ISI@u2v$jn1f(!q9p51(F5u?q>!#`sz(M=QrJRT zi)*D%NWEdR!wO!%Nh>1akbxb7Ro0bTkaC77GN43t#cyL??!xd$%y8zeLL~?>RY)iE z9NV*Vn7*l*9KR;7va`|+G$8PI76{rpR_*8$;(dfKvMcL#gXq{Ge8IS?Q0|!(|LHlG zD-lJot1<)mK9q~H^3J+U($TYVZM=zSgplm#nOIaT56Pkkp%P2(yIF{6TW%M*glp?4swvUhh9LW zprE0s=;y#lh=euLwuJ)(Y1D?L`0?;a?`pfAr6!I$7X2yy?na+&J)4SeWHh#AvWCyX zH|Nl+G2T_!fZHeQ7I~9Ro5__0lf%E3-*O*t?$^4Hu^JKc`kW+ewQu6YQ%%_3k?q@6 z+JsyD4sAT;o3qxblO8tj2Y3F>qlKJ&tn#c##hH@jp_N|5`_~(t`HOz@Sd)J;qxq!E zcy~m(|0ye&=1aqfl$fhj!re6vyndSnW2h;YY*j@Oy`s#Z+4y>QUc2Cu#FGmmKRVc@ zl~=bHFcPeTcDCyr%pG;--dWueBpFRY-f1*bHFIqx=&_~iXae-bb0!NnV&>gYH0e5$ z51A^)6ELnBK%(`AJ59Sd=9~W5OIfuF=&H2jRY|(MK4YI0Qd)1EnObl}xND@3(GZwI z6QrNcHLxf7{Zt%a4}*u*2eC#KNsU^ev}BS$-RAJANo4RV%Q&lcVj|x;;TqkT| z^6s6NCagP|-IUf7lDp>t#cS!)jv(Ik*EMAy+52)mX8tmjL%SH4?R-pn^I>+WQ(8<* zxJGVUj?j9`h@O3w*`CeolHIb4wh}7SmwhV+qT=;rRjdfl*G$eYnosMUy)2^`V=CKY z-nx`tjg!l~c!|4Dcf{rx(Q3?U4Ch_pCuRF={<0>aQ0DZ}3jwd?tn$oL33)miDLOUU zHman;s;1d1VvB=r;in(({ECc;Y&Qbtbib#08|bc>UxQ~_yYji_{OU($6;34h89Ma#=Sg!VIYDF?9 zgo{0RHsOIGRfX2sGe1!FL}^@IxtTq9^T(pH#6saRlh%cen`WVt-B)D8%@fvRuA1n$ zdzf}?PGu=}zYSPTv~L%WP9E)5&tA|%d%@3ND^fSjYfJJ9E@L~IhDxABkDL)wD4L#e z%D!u^VP^O0i9fxm@zbu4Rzo#T7|p7CJx`5tvg?-WH&?U{N9yY=+@bvHo~KknIb&j7 zu3qzu_R7gI>);~{|AOAWp$x@TRX<95OSL8T-Rk=*<(7GfAtQmMjAs%K$0L*nqek*D ztjz#=S#4Bq?bSNd%&to8qJef_aOY|Lo)W=X;jpuq-u4zZ&yYOF&ZmOzML6XTm8Bb# zw+h5@-jZ^UhYH`tw9J~&1!jM*J#|V-v!SV3TCuIv%vXNkU0;|?pI_zi;(}gHH35~} z1e%qsKLTSCHs_*}+gDbY{9UbSy|Gm~-%QWuTg8(Y&pGh}qk8heI^9Wv(^JT4mzQ+* zla}rV{tv4<61~0gG{bz<)KS@Jsgb_A$y$=2ZfnqvRbKT*y&7r?1UXM&WAj{kN zd!isoC&=dEn(TO+mB{<5q!)%*=~VJ zLS9Njl|f0VHfD7^kC@v8Z{}owu3yarsbkN0t@e!!8;@l3fv~=5K{tX!y|4M&&+g=b zncuV|%bZg6ta~;3L`msI6{BtCROE4Hig^8ZA;P|x){KJk00qyv#_qvdY&F`^v++-l zIuYM-HTwKnS&6wq1;wF^)k=2SM@zn`6SWP#GxG}7buA6fkn2-{c`2>OgA1>^F4veo zcfr-#%BYm9^gNo#{IyF?)3wJk+4kzv#}5>130xDUki-n4d|&)WhS~d#AepQ5mini) zDt&7qR@rh}+^f%3O1r(3zmt@X`7vSMUY&cs(VWQCnr?j(DBfwRH?`n_dey&?%qO<~ zAet7H-0-LGZ|?QSMq^L`b^+9!%rjOYYSv_kP!jTBS*vQwSlRj(``&trqF}z0l!I-C z+VKUoamwK$wElwVWd#Z`rB4F2eM19Dh$Z4}H-i~c-Xhc|nez6MWQp3)@%FYyLmv25 z9gIP!d}nN0%)-WpUAjHSS%>owy6;tf6YJ7O(B+2rmnW1OPuUYbQWAdgDZ?vnsmYsK zMW+Pjd%1Xb&Fp=$kD>b_-qK##WNb0NND)nF*s~q|vmtxUM&B+W>aB@%r=ew!ZS2M` zgP9ky+nY2ht|4+{WZj~4M24K*#b&}6wl8mdv@XBqU0>~}DsStK=2KM+n_IB&^pZC+ z87+?&EdQRDcc@rwT5)i@tSel)OUc}?N~wx6uIc(U^IBZwmG8;gNT z%v+wY(p4t$@Skq4*Q0u~fZ=G4&eb<9X&rQmtY>1!G}Tv-;=f&t``v zOo!ueJSukMYD6L1bRi$>#dJ7(_DH5|TmFrt>laiDlFI56&gUr^Rl)BqpP#|Q#kp%7 zvqfVlGvPtSy&<6xrCrY!C3E|y^9ETc@xvp*Q%W+oCFxTpYF>m7iPYs>yJI7JP3!zL zw+aU&Z`Lm!GLv3$8K#J+%b>;|#Sxg+P2}~Sy!5SD> zAL35iLiVd8*(%!UD8zXu&$W0*!CpzX~XV!z_}~}J^D$1e~#{}JE3Mls!T^G zDYo4ik)Mi0b2T1Ws{5#19M<+`>Or*c-?Rp^mmQ%?v?df0%_8MyhOlaD9;@&{OxzMi zTPcVjj?<>|^0YRxTCuk5`11uV#9}n7M^vj~I^`Dq(_u7g^{n=GK^%Iq9ie#>9#U}& zT0G3J$(a;GZ@+_uvEmw6J4r+6slhny&CgjK+UpT`wUx~#K{#r{Fp#y*@SX0_3T>O? zRe}4sv^i`0O=g%!oIr2)U+rbuZsMmAUM^qO*1hTY*^7gpZd)!&*4|(Uo`E4(FrF`do9VIfh<}N}&T0#|NS$X2%0e-LV-;C5U#2{n zw#&;oN0`U}_`v;=3|CQW9v+8}nyAXg~l600AHX1b_e#00KY&2mk>f00e-*-(8?7wt;ebU&O9s zUh!E`hWc$Xu}Zgpuq((8;wIgH5cT-&yI<#S)!xSM;@h{t)jMa9igSfBlQaK^Ze4(G zfdCKy0zd!=00AHX1c1PQyTAlbPX7$`b*fGBOCJSUY-;DKpfR&6p|O3d*az0%LLBc{m=E4OW*G7SVL!-Zl!46-}m`oUh*k>0yq}r1Ud8X|NhGd-)Gqh|MGWVHxA+h0zd!=00AHX1b_e#00KY&2mk>f z00d0KL5X#+7XSeu00e*l5C8%|00;m9AOHk_01)`51dh@se*XG{gZEJ7L6dJPbPy~M z00KY&2mk>f00e*l5C8%|00;nqZ&P6S!I$ddTwEuiAwA=7lK?~w1b_e#00KY&2mk>f z00e*l5C8%|;F}V#0|f)VsqqHE0s$ZZ1b_e#00KY&2mk>f00e*l5C8=Oz&-#3fB+Bx z0zd!=00AHX1b_e#00KbZ+Y$f;1HP^C2GIfmAOHk_01yBIKmZ5;0U!VbfB+CM1qB1Z zP5=ae01yBIKmZ5;0U!VbfB+Bx0zlx~5QzOC^F(Ef00e*l5C8%|;M)=a1p~gV@dnWX0U!VbfB+Bx0zd!= z00AHX1b_e#00jfUJ^%!O01yBIKmZ5;0U!VbfB+Bx0zlv!695GRzOexZ;Q|3500e*l z5C8%|00;m9AOHk_z&9@d3I=@hy8r+H0zd!=00AHX1b_e#00KY&2mpa^OaK%N_{Iht zgbM_K01yBIKmZ5;0U!VbfB+Bx0zhE+0rxJhopur$(lZ7eKmZ5;0U!VbfB+Bx0zd!= z00AHX1c1QbUH}vf_}dRL2oMMW0U!VbfB+Bx0zd!=00AHX1c1Q52!MhC|H1(t00AHX z1b_e#00KY&2mk>f00e*l5ct~*B++J%DP^7fk$YuP>}pBy?Cw177b<^bILrNBS&9>A zxF2J)6#vtN+O6k56;-*~ygVd)apkuz`dhhu3QXBAYYRXF0zd!=00AHX1b_e#00KY& z2mk>f00jP*3DA2(qas~3^>fGF=1N~H_X$ literal 0 HcmV?d00001 From f701ecd17b6d3d217371c4d4d755f49f4f9c8a0a Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Tue, 28 May 2019 08:06:57 +1000 Subject: [PATCH 02/35] EXT4: Fix an out-of-bounds exception when the inode data was only 128 bytes, but EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE was set. --- fs/src/fs/org/jnode/fs/ext2/INode.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/src/fs/org/jnode/fs/ext2/INode.java b/fs/src/fs/org/jnode/fs/ext2/INode.java index 690f939f6b..e61f29787a 100644 --- a/fs/src/fs/org/jnode/fs/ext2/INode.java +++ b/fs/src/fs/org/jnode/fs/ext2/INode.java @@ -166,7 +166,7 @@ public Ext2FileSystem getExt2FileSystem() { * @return the extra size. */ public int getExtraISize() { - if (getExt2FileSystem().hasROFeature(Ext2Constants.EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) { + if (getExt2FileSystem().hasROFeature(Ext2Constants.EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE) && data.length > 0x82) { return LittleEndian.getInt16(data, 0x80); } From 30d0cda5cea44cb0cc5b7c0f34c85877921fa3f0 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Fri, 31 May 2019 16:02:55 +1000 Subject: [PATCH 03/35] NTFS: fix a stack overflow error getting the file record's file name. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index dcc5d4b05d..bbf2b08f6c 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -649,11 +649,15 @@ public void readData(int attributeType, String streamName, long fileOffset, byte @Override public String toString() { - if (isInUse()) { - return String.format("FileRecord [%d fileName='%s']", referenceNumber, getFileName()); - } else { - return String.format("FileRecord [%d unused]", referenceNumber); + try { + if (isInUse()) { + return String.format("FileRecord [%d fileName='%s']", referenceNumber, getFileName()); + } + } catch (Exception e) { + log.debug("Error getting file name for file record: " + referenceNumber, e); } + + return String.format("FileRecord [%d unused]", referenceNumber); } /** From d1626fcd4fa3310d0683627c969969bfabdff972 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Mon, 3 Jun 2019 10:10:31 +1000 Subject: [PATCH 04/35] NTFS: fix another stack overflow error getting the file record's file name. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 30 ++++++++++++--------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index bbf2b08f6c..abe5ceca76 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -129,20 +129,22 @@ public FileRecord(NTFSVolume volume, int bytesPerSector, int clusterSize, boolea * @throws IOException if an error occurs. */ public void checkIfValid() throws IOException { - // check for the magic number to see if we have a filerecord + // check for the magic number to see if we have a file record if (getMagic() != Magic.FILE) { - log.debug("Invalid magic number found for FILE record: " + getMagic() + " -- dumping buffer"); - for (int off = 0; off < getBuffer().length; off += 32) { - StringBuilder builder = new StringBuilder(); - for (int i = off; i < off + 32 && i < getBuffer().length; i++) { - String hex = Integer.toHexString(getBuffer()[i]); - while (hex.length() < 2) { - hex = '0' + hex; - } + if (log.isDebugEnabled()) { + log.debug("Invalid magic number found for FILE record: " + getMagic() + " -- dumping buffer"); + for (int off = 0; off < getBuffer().length; off += 32) { + StringBuilder builder = new StringBuilder(); + for (int i = off; i < off + 32 && i < getBuffer().length; i++) { + String hex = Integer.toHexString(getBuffer()[i]); + while (hex.length() < 2) { + hex = '0' + hex; + } - builder.append(' ').append(hex); + builder.append(' ').append(hex); + } + log.debug(builder.toString()); } - log.debug(builder.toString()); } throw new IOException("Invalid magic found: " + getMagic()); @@ -410,7 +412,7 @@ public synchronized List getAllAttributes() { readAttributeListAttributes(); } } catch (Exception e) { - log.error("Error getting attributes for entry: " + this, e); + log.error("Error getting attributes for file record: " + referenceNumber, e); } } @@ -686,7 +688,9 @@ private synchronized void readAttributeListAttributes() { attribute = findStoredAttributeByID(entry.getAttributeID()); attributeListBuilder.add(attribute); } else { - log.debug("Looking up MFT entry for: " + entry.getFileReferenceNumber()); + if (log.isDebugEnabled()) { + log.debug("Looking up MFT entry for: " + entry.getFileReferenceNumber()); + } // When reading the MFT itself don't attempt to check the index is in range (we won't know the total // MFT length yet) From 925ab0f630414adfb817c82921f8de7934c8d4ee Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Mon, 3 Jun 2019 11:32:51 +1000 Subject: [PATCH 05/35] NTFS: Always log the attribute entry reference. --- .../fs/ntfs/attribute/AttributeListEntry.java | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java index df95abd3eb..d70598671c 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java @@ -131,16 +131,11 @@ public String getName() { @Override public String toString() { - StringBuilder builder = new StringBuilder(super.toString()); - builder.append("[type=").append(getType()); - builder.append(",name=").append(getName()); - if (getStartingVCN() == 0) { - builder.append(",resident"); - } else { - builder.append(",ref=").append(getFileReferenceNumber()); - builder.append(",vcn=").append(getStartingVCN()); - } - builder.append(",id=").append(getAttributeID()).append("]"); - return builder.toString(); + return String.format("attr-list-entry:[type=0x%x,name='%s',ref=%d,%s,id=0x%x]", + getType(), + getName(), + getFileReferenceNumber(), + getStartingVCN() == 0 ? "resident" : "vcn=" + getStartingVCN(), + getAttributeID()); } } From 906dbe2a1d584a7ce45c1ac5ae7885f8389b73d8 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Mon, 3 Jun 2019 13:50:23 +1000 Subject: [PATCH 06/35] NTFS: Log the entry reference ID on error. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index abe5ceca76..a8bf12aad5 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -702,8 +702,8 @@ private synchronized void readAttributeListAttributes() { attribute = holdingRecord.findStoredAttributeByID(entry.getAttributeID()); if (attribute == null) { - log.error(String.format("Failed to find an attribute matching entry '%s' in the holding record", - entry)); + log.error(String.format("Failed to find an attribute matching entry '%s' in the holding record, ref=%d", + entry, referenceNumber)); } else { attributeListBuilder.add(attribute); } From 373268bcebd15f978669056929976327d3359819 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Mon, 3 Jun 2019 17:10:59 +1000 Subject: [PATCH 07/35] NTFS: fix a stack overflow, take 3. Fix a possible NPE. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 4 ++-- .../fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index a8bf12aad5..70d635a630 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -671,8 +671,8 @@ private synchronized void readAttributeListAttributes() { try { entryIterator = attributeListAttribute.getAllEntries(); } catch (Exception e) { - throw new IllegalStateException("Error getting attributes from attribute list, file record " + - FileRecord.this, e); + throw new IllegalStateException("Error getting attributes from attribute list, file record: " + + referenceNumber, e); } AttributeListBuilder attributeListBuilder = new AttributeListBuilder(); diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java index 4c7fcbed5e..d5e2a2a48a 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java @@ -58,6 +58,10 @@ public class AttributeListBuilder { * @param attribute the attribute to add. */ public void add(NTFSAttribute attribute) { + if (attribute == null) { + return; + } + if (attribute.isResident()) { attributeList.add(attribute); } else { From 4237134a8ff53a879420a6b1363c1ed303d59e3d Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Mon, 3 Jun 2019 17:24:05 +1000 Subject: [PATCH 08/35] NTFS: fallback to stored attributes if all attributes can't be read. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index 70d635a630..a992f04409 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -412,7 +412,9 @@ public synchronized List getAllAttributes() { readAttributeListAttributes(); } } catch (Exception e) { - log.error("Error getting attributes for file record: " + referenceNumber, e); + log.error("Error getting attributes for file record: " + referenceNumber + + ", returning stored attributes", e); + attributeList = new ArrayList(getAllStoredAttributes()); } } From 492c0bb9127316ce5afe5fce3dde36da0c0f812e Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Fri, 21 Jun 2019 10:16:20 +1000 Subject: [PATCH 09/35] HFS+: Fix a null pointer exception. --- .../jnode/fs/hfsplus/HfsUnicodeString.java | 30 ++++++++++++++++- .../fs/hfsplus/attributes/AttributeKey.java | 3 +- .../test/fs/hfsplus/HfsUnicodeStringTest.java | 33 +++++++++++++++++++ 3 files changed, 63 insertions(+), 3 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java index 98d6731807..e0124acb0c 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java @@ -22,7 +22,7 @@ import org.jnode.util.BigEndian; -public class HfsUnicodeString { +public class HfsUnicodeString implements Comparable { /** * Length of string in characters. */ @@ -83,4 +83,32 @@ public final byte[] getBytes() { public String toString() { return string; } + + @Override + public int hashCode() { + return string == null ? 0 : string.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof HfsUnicodeString)) { + return false; + } + + HfsUnicodeString other = (HfsUnicodeString) obj; + return compareTo(other) == 0; + } + + @Override + public int compareTo(HfsUnicodeString other) { + if (string == null && other.string == null) { + return 0; + } else if (string == null) { + return -1; + } else if (other.string == null) { + return 1; + } + + return string.compareTo(other.string); + } } diff --git a/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java b/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java index 432e23bdd0..fa270f45a9 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java @@ -81,8 +81,7 @@ public int compareTo(Key key) { // Note: this is unlikely to be correct. See TN1150 section "Unicode Subtleties" for details // For reading in data is should be safe since the B-Tree will be pre-sorted, but for adding new entries // it will cause the order to be wrong. - result = this.getAttributeName().getUnicodeString() - .compareTo(otherKey.getAttributeName().getUnicodeString()); + result = this.getAttributeName().compareTo(otherKey.getAttributeName()); } } return result; diff --git a/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java b/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java index 89bc4cb5c5..d75099a3f9 100644 --- a/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java +++ b/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java @@ -24,6 +24,8 @@ import org.junit.Test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; public class HfsUnicodeStringTest { private byte[] STRING_AS_BYTES_ARRAY = @@ -49,4 +51,35 @@ public void testConstructAsString() { } } + @Test + public void testEquals() { + HfsUnicodeString string1 = new HfsUnicodeString(STRING_AS_TEXT); + HfsUnicodeString string2 = new HfsUnicodeString(STRING_AS_TEXT); + HfsUnicodeString string3 = new HfsUnicodeString(null); + HfsUnicodeString string4 = new HfsUnicodeString(null); + + assertEquals(string1, string2); + assertEquals(string3, string4); + assertFalse(string1.equals(string3)); + assertFalse(string4.equals(string2)); + } + + @Test + public void testCompareTo() { + HfsUnicodeString nullStr = new HfsUnicodeString(null); + HfsUnicodeString emptyStr = new HfsUnicodeString(""); + HfsUnicodeString string1 = new HfsUnicodeString("test"); + HfsUnicodeString string2 = new HfsUnicodeString("test"); + HfsUnicodeString longerStr = new HfsUnicodeString("testzzz"); + + assertEquals(-1, nullStr.compareTo(emptyStr)); + assertEquals(1, emptyStr.compareTo(nullStr)); + + assertEquals(0, string1.compareTo(string2)); + assertTrue(string1.compareTo(longerStr) < 0); + assertTrue(longerStr.compareTo(string1) > 0); + + assertEquals(1, string1.compareTo(nullStr)); + assertEquals(-1, nullStr.compareTo(string1)); + } } From fb5a9e188fbbc7f5bab08ba0746ea8c388daf1de Mon Sep 17 00:00:00 2001 From: Jessica Knight Date: Fri, 2 Aug 2019 17:22:20 +1000 Subject: [PATCH 10/35] TRIAGE-1037: stop throwing IOException on empty reads in readVCN for NTFS --- .../org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java index fc8bf8bd6c..a49b64ca67 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java @@ -159,10 +159,10 @@ public int readVCN(long vcn, byte[] dst, int dstOffset, int nrClusters) throws I final int clusterSize = volume.getClusterSize(); int readClusters = 0; for (DataRunInterface dataRun : getDataRuns()) { - readClusters += dataRun.readClusters(vcn, dst, dstOffset, nrClusters, clusterSize, volume); - if (readClusters == nrClusters) { + if (readClusters >= nrClusters) { break; } + readClusters += dataRun.readClusters(vcn, dst, dstOffset, nrClusters, clusterSize, volume); } if (log.isDebugEnabled()) { From 5d530bdd6a30f75ddcc934d09dbd777497b5c7c4 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 22 Aug 2019 11:50:43 +1000 Subject: [PATCH 11/35] FAT: expose base name. --- fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java index 041368beb5..d4a07a19ea 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java @@ -308,7 +308,7 @@ public boolean isShortDirEntry() { return true; } - private FatCase getNameCase() { + public FatCase getNameCase() { return ncase; } @@ -411,6 +411,10 @@ public String getBase() { return base; } + protected void setBase(String base) { + this.base = base; + } + public String getExt() { return ext; } From 485b9c8cd3b4101b7d90a69a6035bdbefc74bb28 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 22 Aug 2019 11:51:23 +1000 Subject: [PATCH 12/35] exFAT: fixes around allocated size vs file size. --- .../org/jnode/fs/exfat/DirectoryParser.java | 10 +++---- fs/src/fs/org/jnode/fs/exfat/Node.java | 26 ++++++++++++++++++- 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java b/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java index 843668913a..f1292119b9 100644 --- a/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java +++ b/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java @@ -232,14 +232,10 @@ private void parseFile(Visitor v, boolean deleted) throws IOException { int nameLen = DeviceAccess.getUint8(chunk); final int nameHash = DeviceAccess.getUint16(chunk); skip(2); /* unknown */ - final long realSize = DeviceAccess.getUint64(chunk); + final long size = DeviceAccess.getUint64(chunk); skip(4); /* unknown */ final long startCluster = DeviceAccess.getUint32(chunk); - final long size = DeviceAccess.getUint64(chunk); - - if (realSize != size) { - throw new IOException("real size does not equal size"); - } + final long allocatedSize = DeviceAccess.getUint64(chunk); conts--; @@ -288,7 +284,7 @@ private void parseFile(Visitor v, boolean deleted) throws IOException { " != " + Integer.toHexString(nameHash) + ")"); } - v.foundNode(Node.create(sb, startCluster, attrib, name, (flag == FLAG_CONTIGUOUS), realSize, times, deleted), + v.foundNode(Node.create(sb, startCluster, attrib, name, (flag == FLAG_CONTIGUOUS), size, allocatedSize, times, deleted), index); } diff --git a/fs/src/fs/org/jnode/fs/exfat/Node.java b/fs/src/fs/org/jnode/fs/exfat/Node.java index b94df689c2..731a651882 100644 --- a/fs/src/fs/org/jnode/fs/exfat/Node.java +++ b/fs/src/fs/org/jnode/fs/exfat/Node.java @@ -21,6 +21,7 @@ package org.jnode.fs.exfat; import java.io.IOException; +import org.apache.log4j.Logger; /** * @author Matthias Treydte <waldheinz at gmail.com> @@ -47,16 +48,21 @@ public static Node createRoot(ExFatSuperBlock sb) public static Node create( ExFatSuperBlock sb, long startCluster, int flags, - String name, boolean isContiguous, long size, EntryTimes times, boolean deleted) { + String name, boolean isContiguous, long size, long allocatedSize, EntryTimes times, boolean deleted) { final Node result = new Node(sb, startCluster, times); result.name = name; result.isContiguous = isContiguous; result.size = size; + result.allocatedSize = allocatedSize; result.flags = flags; result.deleted = deleted; + if (allocatedSize < size) { + Logger.getLogger(Node.class).warn("Allocated size less than file size: " + result); + } + return result; } @@ -69,7 +75,17 @@ public static Node create( private long clusterCount; private int flags; private String name; + + /** + * The size of the file in bytes. + */ private long size; + + /** + * The size allocated for the file in bytes. This may be larger than {@code size} if the OS has reserved some space + * for the file to grow into. + */ + private long allocatedSize; private boolean deleted; private Node(ExFatSuperBlock sb, long startCluster, EntryTimes times) { @@ -125,6 +141,10 @@ public long getSize() { return size; } + public long getAllocatedSize() { + return allocatedSize; + } + public boolean isDeleted() { return deleted; } @@ -170,6 +190,10 @@ public String toString() { result.append(this.name); result.append(", contiguous="); result.append(this.isContiguous); + result.append(", size="); + result.append(size); + result.append(", allocated-size="); + result.append(allocatedSize); result.append("]"); return result.toString(); From 0112ad612bf2cf3e7f66f3fd80ab6b28c78a62a2 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 22 Aug 2019 12:06:07 +1000 Subject: [PATCH 13/35] exFAT: expose the file's node. --- fs/src/fs/org/jnode/fs/exfat/NodeFile.java | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/src/fs/org/jnode/fs/exfat/NodeFile.java b/fs/src/fs/org/jnode/fs/exfat/NodeFile.java index 97357388c7..c64d4df4bf 100644 --- a/fs/src/fs/org/jnode/fs/exfat/NodeFile.java +++ b/fs/src/fs/org/jnode/fs/exfat/NodeFile.java @@ -39,6 +39,10 @@ public NodeFile(ExFatFileSystem fs, Node node) { this.node = node; } + public Node getNode() { + return node; + } + @Override public long getLength() { return this.node.getSize(); From 3786512c2caa9bba9ec12e117883ec20176efdc7 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 5 Sep 2019 16:43:19 +1000 Subject: [PATCH 14/35] NTFS: Add support for 5-byte cluster offsets in data runs. --- .../src/core/org/jnode/util/LittleEndian.java | 17 ++++++++ .../fs/org/jnode/fs/ntfs/NTFSStructure.java | 16 +++++-- .../fs/ntfs/datarun/CompressedDataRun.java | 5 ++- .../fs/org/jnode/fs/ntfs/datarun/DataRun.java | 20 +++++---- .../jnode/fs/ntfs/datarun/DataRunDecoder.java | 8 ++-- .../fs/ntfs/datarun/DataRunInterface.java | 2 +- .../test/fs/ntfs/NTFSDataRunDecoderTest.java | 42 +++++++++++++++++++ 7 files changed, 92 insertions(+), 18 deletions(-) diff --git a/core/src/core/org/jnode/util/LittleEndian.java b/core/src/core/org/jnode/util/LittleEndian.java index 036ccafc44..c3afd8c5fb 100644 --- a/core/src/core/org/jnode/util/LittleEndian.java +++ b/core/src/core/org/jnode/util/LittleEndian.java @@ -126,6 +126,23 @@ public static int getInt32(byte[] src, int offset) { return ((v3 << 24) | (v2 << 16) | (v1 << 8) | v0); } + /** + * Gets a 40-bit signed integer from the given byte array at the given offset. + * + * @param src + * @param offset + */ + public static long getInt40(byte[] src, int offset) { + final long v0 = src[offset + 0] & 0xFF; + final long v1 = src[offset + 1] & 0xFF; + final long v2 = src[offset + 2] & 0xFF; + final long v3 = src[offset + 3] & 0xFF; + final long v4 = src[offset + 4] & 0xFF; + long tmp = (v4 << 32) | (v3 << 24) | (v2 << 16) | (v1 << 8) | v0; + tmp <<= 24; // Shift the value to the top of the 8 bytes in the long, and back to extend any -ve sign + return tmp >> 24; + } + /** * Gets a 48-bit unsigned integer from the given byte array at the given offset. * diff --git a/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java b/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java index a9f2c758d4..b70dd6e435 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java +++ b/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java @@ -181,7 +181,7 @@ public final int getInt24(int offset) { } /** - * Read n signed 32-bit integer from a given offset. + * Read a signed 32-bit integer from a given offset. * * @param offset * @return @@ -191,7 +191,17 @@ public final int getInt32(int offset) { } /** - * Read n signed 48-bit integer from a given offset. + * Read a signed 40-bit integer from a given offset. + * + * @param offset + * @return + */ + public final long getInt40(int offset) { + return LittleEndian.getInt40(buffer, this.offset + offset); + } + + /** + * Read a signed 48-bit integer from a given offset. * * @param offset * @return @@ -201,7 +211,7 @@ public final long getInt48(int offset) { } /** - * Read n signed 64-bit integer from a given offset. + * Read a signed 64-bit integer from a given offset. * * @param offset * @return diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java index e4f41e5946..b9ef4b371f 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.log4j.Logger; import org.jnode.fs.ntfs.NTFSVolume; +import org.jnode.fs.util.FSUtils; import org.jnode.util.LittleEndian; /** @@ -70,7 +71,7 @@ public CompressedDataRun(DataRun compressedRun, int compressionUnitSize) { * * @return the length of the run in clusters. */ - public int getLength() { + public long getLength() { return compressionUnitSize; } @@ -116,7 +117,7 @@ public int readClusters(long vcn, byte[] dst, int dstOffset, int nrClusters, int // This is the actual number of stored clusters after compression. // If the number of stored clusters is the same as the compression unit size, // then the data can be read directly without decompressing it. - final int compClusters = compressedRun.getLength(); + int compClusters = FSUtils.checkedCast(compressedRun.getLength()); if (compClusters == compressionUnitSize) { return compressedRun.readClusters(vcn, dst, dstOffset, compClusters, clusterSize, volume); } diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java index 015b01e86e..206be9702a 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java @@ -25,6 +25,7 @@ import org.apache.log4j.Logger; import org.jnode.fs.ntfs.NTFSStructure; import org.jnode.fs.ntfs.NTFSVolume; +import org.jnode.fs.util.FSUtils; /** * @author Ewout Prangsma (epr@users.sourceforge.net) @@ -48,7 +49,7 @@ public final class DataRun implements DataRunInterface { /** * Length of datarun in clusters */ - private final int length; + private final long length; /** * Flag indicating that the data is not stored on disk but is all zero. @@ -74,7 +75,7 @@ public final class DataRun implements DataRunInterface { * @param size Size in bytes of this datarun descriptor * @param vcn First VCN of this datarun. */ - public DataRun(long cluster, int length, boolean sparse, int size, long vcn) { + public DataRun(long cluster, long length, boolean sparse, int size, long vcn) { this.cluster = cluster; this.length = length; this.sparse = sparse; @@ -115,12 +116,12 @@ public DataRun(NTFSStructure attr, int offset, long vcn, long previousLCN) { length = dataRunStructure.getUInt24(1); break; case 0x04: - length = dataRunStructure.getUInt32AsInt(1); + length = dataRunStructure.getUInt32(1); break; default: throw new IllegalArgumentException("Invalid length length " + lenlen); } - final int cluster; + final long cluster; switch (clusterlen) { case 0x00: sparse = true; @@ -138,6 +139,9 @@ public DataRun(NTFSStructure attr, int offset, long vcn, long previousLCN) { case 0x04: cluster = dataRunStructure.getInt32(1 + lenlen); break; + case 0x05: + cluster = dataRunStructure.getInt40(1 + lenlen); + break; default: throw new IllegalArgumentException("Unknown cluster length " + clusterlen); } @@ -176,7 +180,7 @@ public int getSize() { * * @return Returns the length. */ - public int getLength() { + public long getLength() { return length; } @@ -216,7 +220,7 @@ public int readClusters(long vcn, byte[] dst, int dstOffset, int nrClusters, int NTFSVolume volume) throws IOException { final long myFirstVcn = getFirstVcn(); - final int myLength = getLength(); + final long myLength = getLength(); final long myLastVcn = getLastVcn(); final long reqLastVcn = vcn + nrClusters - 1; @@ -235,13 +239,13 @@ public int readClusters(long vcn, byte[] dst, int dstOffset, int nrClusters, int final int actDstOffset; // Actual dst offset if (vcn < myFirstVcn) { final int vcnDelta = (int) (myFirstVcn - vcn); - count = Math.min(nrClusters - vcnDelta, myLength); + count = FSUtils.checkedCast(Math.min(nrClusters - vcnDelta, myLength)); actDstOffset = dstOffset + (vcnDelta * clusterSize); actCluster = getCluster(); } else { // vcn >= myFirstVcn final int vcnDelta = (int) (vcn - myFirstVcn); - count = Math.min(nrClusters, myLength - vcnDelta); + count = FSUtils.checkedCast(Math.min(nrClusters, myLength - vcnDelta)); actDstOffset = dstOffset; actCluster = getCluster() + vcnDelta; } diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java index 484d17fbe1..95fda6be06 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java @@ -59,7 +59,7 @@ public class DataRunDecoder { /** * The last compressed run size. */ - private int lastCompressedSize = 0; + private long lastCompressedSize = 0; /** * The last compressed run to append to. @@ -106,7 +106,7 @@ public void readDataRuns(NTFSStructure parent, int offsetInParent) { // Also the sparse run following a compressed run can be coalesced with a subsequent 'real' sparse // run. So add that in if we hit one if (dataRun.getLength() + lastCompressedSize > compressionUnit) { - int length = dataRun.getLength() - (compressionUnit - lastCompressedSize); + long length = dataRun.getLength() - (compressionUnit - lastCompressedSize); dataRuns.add(new DataRun(0, length, true, 0, vcn)); this.numberOfVCNs += length; @@ -120,11 +120,11 @@ public void readDataRuns(NTFSStructure parent, int offsetInParent) { // coalesced into a single run and even coalesced into the next compressed run. In that case the // compressed run needs to be split off - int remainder = dataRun.getLength() % compressionUnit; + long remainder = dataRun.getLength() % compressionUnit; if (remainder != 0) { // Uncompressed run coalesced with compressed run. First add in the uncompressed portion: - int uncompressedLength = dataRun.getLength() - remainder; + long uncompressedLength = dataRun.getLength() - remainder; DataRun uncompressed = new DataRun(dataRun.getCluster(), uncompressedLength, false, 0, vcn); dataRuns.add(uncompressed); vcn += uncompressedLength; diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java index 989a18d21d..6686b427fc 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java @@ -33,7 +33,7 @@ public interface DataRunInterface { * * @return the length of the run in clusters. */ - int getLength(); + long getLength(); /** * Reads clusters from this datarun. diff --git a/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java b/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java index 6ac751e871..97562c4db4 100644 --- a/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java +++ b/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java @@ -414,6 +414,48 @@ public void testCombiningSubsequentAttributesRuns() { assertDataRuns(dataRuns, expectedRuns); } + @Test + public void testDataRunWithLargeNegativeOffset() { + // Arrange + byte[] buffer = toByteArray( + "33 C0 3B 01 00 00 0C 43 14 C8 00 2C 43 F5 1E 43 F1 15 01 63 63 EB 25 42 A7 77 FA 5E E8 " + + "0E 42 94 4A 6E 7B BA 0D 43 70 CA 00 09 FF 50 19 43 A5 0F 01 FC FF D1 B3 42 16 65 AE 99 F8 2A 43 " + + "6C C8 00 EA 1D 94 15 43 0C C8 00 BF CB 9F B3 43 1D D2 00 71 EE BA 0D 43 03 C8 00 D9 43 B2 00 43 " + + "32 C9 00 6C F8 B1 5D 43 08 C8 00 AF E4 2A 8E 43 06 C8 00 E2 CB 2F 1F 43 25 C8 00 66 A1 F0 30 43 " + + "0F C8 00 2B 04 B4 08 43 2D C9 00 D0 A6 87 E0 43 1A C8 00 0B 97 E0 29 52 C2 08 C9 D2 B8 7F FF 43 " + + "00 88 00 68 5C CC 6B 00"); + DataRunDecoder dataRunDecoder = new DataRunDecoder(false, 1); + + // Act + dataRunDecoder.readDataRuns(new NTFSStructure(buffer, 0), 0); + List dataRuns = dataRunDecoder.getDataRuns(); + + // Assert + String expectedRuns = + "[data-run vcn:0-80831 cluster:786432]\n" + + "[data-run vcn:80832-132051 cluster:520176428]\n" + + "[data-run vcn:132052-203204 cluster:1156359823]\n" + + "[data-run vcn:203205-233835 cluster:1406469513]\n" + + "[data-run vcn:233836-252927 cluster:1636794615]\n" + + "[data-run vcn:252928-304751 cluster:2061533184]\n" + + "[data-run vcn:304752-374292 cluster:783450108]\n" + + "[data-run vcn:374293-400170 cluster:1504385450]\n" + + "[data-run vcn:400171-451478 cluster:1866413972]\n" + + "[data-run vcn:451479-502690 cluster:585040723]\n" + + "[data-run vcn:502691-556479 cluster:815395268]\n" + + "[data-run vcn:556480-607682 cluster:827078045]\n" + + "[data-run vcn:607683-659188 cluster:2399022601]\n" + + "[data-run vcn:659189-710396 cluster:489231032]\n" + + "[data-run vcn:710397-761602 cluster:1012457114]\n" + + "[data-run vcn:761603-812839 cluster:1833533440]\n" + + "[data-run vcn:812840-864054 cluster:1979548715]\n" + + "[data-run vcn:864055-915555 cluster:1451567867]\n" + + "[data-run vcn:915556-966781 cluster:2154152454]\n" + + "[data-run vcn:966782-969023 cluster:2004175]\n" + + "[data-run vcn:969024-1003839 cluster:1810559287]\n"; + assertDataRuns(dataRuns, expectedRuns); + } + /** * Asserts the list of data runs is correct. * From d47b3db984fd15bd52e66623655670ac0cac3647 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 19 Sep 2019 16:38:40 +1000 Subject: [PATCH 15/35] NTFS: Fix a NPE decoding data runs. --- .../fs/org/jnode/fs/ntfs/datarun/DataRun.java | 3 --- .../jnode/fs/ntfs/datarun/DataRunDecoder.java | 3 ++- .../test/fs/ntfs/NTFSDataRunDecoderTest.java | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java index 206be9702a..3d10b31b39 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java @@ -32,9 +32,6 @@ */ public final class DataRun implements DataRunInterface { - /** - * Type of this datarun - */ /** * logger */ diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java index 95fda6be06..0d21276415 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java @@ -133,7 +133,8 @@ public void readDataRuns(NTFSStructure parent, int offsetInParent) { // Next add in the compressed portion DataRun compressedRun = new DataRun(dataRun.getCluster() + uncompressedLength, remainder, false, 0, vcn); - dataRuns.add(new CompressedDataRun(compressedRun, compressionUnit)); + lastCompressedRun = new CompressedDataRun(compressedRun, compressionUnit); + dataRuns.add(lastCompressedRun); expectingSparseRunNext = true; lastCompressedSize = remainder; diff --git a/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java b/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java index 97562c4db4..1eb9028990 100644 --- a/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java +++ b/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java @@ -456,6 +456,24 @@ public void testDataRunWithLargeNegativeOffset() { assertDataRuns(dataRuns, expectedRuns); } + @Test + public void testCompressedExpectingSparseAfterMerge() { + // Arrange + byte[] buffer = toByteArray( + "41 13 D5 68 A2 0B 21 09 68 FF 01 04 00"); + DataRunDecoder dataRunDecoder = new DataRunDecoder(true, 16); + + // Act + dataRunDecoder.readDataRuns(new NTFSStructure(buffer, 0), 0); + List dataRuns = dataRunDecoder.getDataRuns(); + + // Assert + String expectedRuns = + "[data-run vcn:0-15 cluster:195193045]\n" + + "[compressed-run vcn:16-31 [[data-run vcn:16-18 cluster:195193061], [data-run vcn:19-27 cluster:195192893]]]\n"; + assertDataRuns(dataRuns, expectedRuns); + } + /** * Asserts the list of data runs is correct. * From 224ec6352236de82b34923db5a748b9877d57aac Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Tue, 24 Sep 2019 12:19:10 +1000 Subject: [PATCH 16/35] NTFS: lazily evaluate stored attributes. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 31 +++++++++++++++------ 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index a992f04409..dff48ecad4 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -116,11 +116,6 @@ public FileRecord(NTFSVolume volume, int bytesPerSector, int clusterSize, boolea this.volume = volume; this.clusterSize = clusterSize; this.referenceNumber = referenceNumber; - - storedAttributeList = readStoredAttributes(); - - // Linux NTFS docs say there can only be one of these, so I'll believe them. - attributeListAttribute = (AttributeListAttribute) findStoredAttributeByType(NTFSAttribute.Types.ATTRIBUTE_LIST); } /** @@ -361,6 +356,9 @@ public FileNameAttribute getFileNameAttribute() { * @return an iterator over attributes stored in this file record. */ public List getAllStoredAttributes() { + if (storedAttributeList == null) { + storedAttributeList = readStoredAttributes(); + } return storedAttributeList; } @@ -371,7 +369,7 @@ public List getAllStoredAttributes() { * @return the attribute found, or {@code null} if not found. */ private NTFSAttribute findStoredAttributeByID(int id) { - for (NTFSAttribute attr : storedAttributeList) { + for (NTFSAttribute attr : getAllStoredAttributes()) { if (attr != null && attr.getAttributeID() == id) { return attr; } @@ -387,7 +385,7 @@ private NTFSAttribute findStoredAttributeByID(int id) { * @see NTFSAttribute.Types */ private NTFSAttribute findStoredAttributeByType(int typeID) { - for (NTFSAttribute attr : storedAttributeList) { + for (NTFSAttribute attr : getAllStoredAttributes()) { if (attr != null && attr.getAttributeType() == typeID) { return attr; } @@ -395,6 +393,21 @@ private NTFSAttribute findStoredAttributeByType(int typeID) { return null; } + /** + * Gets the attributes list attribute, if the record has one. + * + * @return the attribute, or {@code null}. + */ + public AttributeListAttribute getAttributeListAttribute() { + if (attributeListAttribute == null) { + // Linux NTFS docs say there can only be one of these, so I'll believe them. + attributeListAttribute = + (AttributeListAttribute) findStoredAttributeByType(NTFSAttribute.Types.ATTRIBUTE_LIST); + } + + return attributeListAttribute; + } + /** * Gets a collection of all attributes in this file record, including any attributes * which are stored in other file records referenced from an $ATTRIBUTE_LIST attribute. @@ -404,7 +417,7 @@ private NTFSAttribute findStoredAttributeByType(int typeID) { public synchronized List getAllAttributes() { if (attributeList == null) { try { - if (attributeListAttribute == null) { + if (getAttributeListAttribute() == null) { log.debug("All attributes stored"); attributeList = new ArrayList(getAllStoredAttributes()); } else { @@ -671,7 +684,7 @@ private synchronized void readAttributeListAttributes() { Iterator entryIterator; try { - entryIterator = attributeListAttribute.getAllEntries(); + entryIterator = getAttributeListAttribute().getAllEntries(); } catch (Exception e) { throw new IllegalStateException("Error getting attributes from attribute list, file record: " + referenceNumber, e); From 95bfcb8235e72696b33e5efefc3d8bc994c9f385 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 26 Sep 2019 15:56:46 +1000 Subject: [PATCH 17/35] NTFS: improve attribute debug logging. --- .../org/jnode/fs/ntfs/attribute/NTFSAttribute.java | 13 +++++++++++++ .../fs/ntfs/attribute/NTFSNonResidentAttribute.java | 13 ------------- .../fs/ntfs/attribute/NTFSResidentAttribute.java | 12 ------------ 3 files changed, 13 insertions(+), 25 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java index 900fba8069..92a4f89cae 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java @@ -26,6 +26,7 @@ import org.jnode.fs.ntfs.StandardInformationAttribute; import org.jnode.fs.ntfs.index.IndexAllocationAttribute; import org.jnode.fs.ntfs.index.IndexRootAttribute; +import org.jnode.fs.util.FSUtils; /** * @author Chira @@ -173,6 +174,18 @@ public int getSize() { return getUInt32AsInt(4); } + /** + * Generates a hex dump of the attribute's data. + * + * @return the hex dump. + */ + public String hexDump() { + int length = getBuffer().length - getOffset(); + byte[] data = new byte[length]; + getData(0, data, 0, data.length); + return FSUtils.toString(data); + } + /** * Generates a debug string for the attribute. * diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java index a49b64ca67..4b80d78715 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java @@ -26,7 +26,6 @@ import org.jnode.fs.ntfs.NTFSVolume; import org.jnode.fs.ntfs.datarun.DataRunDecoder; import org.jnode.fs.ntfs.datarun.DataRunInterface; -import org.jnode.fs.util.FSUtils; /** * An NTFS file attribute that has its data stored outside the attribute. @@ -172,18 +171,6 @@ public int readVCN(long vcn, byte[] dst, int dstOffset, int nrClusters) throws I return readClusters; } - /** - * Generates a hex dump of the attribute's data. - * - * @return the hex dump. - */ - public String hexDump() { - int length = getBuffer().length - getOffset(); - byte[] data = new byte[length]; - getData(0, data, 0, data.length); - return FSUtils.toString(data); - } - @Override public String toString() { return String.format("[attribute (non-res) type=x%x name'%s' size=%d runs=%d]", getAttributeType(), diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java index 1069f65ae5..4ca936b91c 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java @@ -21,7 +21,6 @@ package org.jnode.fs.ntfs.attribute; import org.jnode.fs.ntfs.FileRecord; -import org.jnode.fs.util.FSUtils; /** * An NTFS file attribute that has its data stored inside the attribute. @@ -59,17 +58,6 @@ public int getAttributeLength() { return (int) getUInt32(0x10); } - /** - * Generates a hex dump of the attribute's data. - * - * @return the hex dump. - */ - public String hexDump() { - byte[] attributeData = new byte[getAttributeLength()]; - getData(getAttributeOffset(), attributeData, 0, attributeData.length); - return FSUtils.toString(attributeData); - } - @Override public String toString() { return String.format("[attribute (res) type=x%x name'%s' size=%d]", getAttributeType(), getAttributeName(), From 067c20b8744104c5d44d7bccf8ae523fd8435a9d Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Tue, 29 Oct 2019 08:59:10 +1100 Subject: [PATCH 18/35] NTFS: Fix a possible stack overflow. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index dcc5d4b05d..538a000d44 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -649,10 +649,21 @@ public void readData(int attributeType, String streamName, long fileOffset, byte @Override public String toString() { + // Only look at stored attributes to determine the file name to avoid a possible stack overflow + String fileName = null; + for (NTFSAttribute attribute : getAllStoredAttributes()) { + if (attribute.getAttributeType() == NTFSAttribute.Types.FILE_NAME) { + FileNameAttribute fileNameAttribute = (FileNameAttribute) attribute; + if (fileName == null || fileNameAttribute.getNameSpace() == FileNameAttribute.NameSpace.WIN32) { + fileName = fileNameAttribute.getFileName(); + } + } + } + if (isInUse()) { - return String.format("FileRecord [%d fileName='%s']", referenceNumber, getFileName()); + return String.format("FileRecord [%d name='%s']", referenceNumber, fileName); } else { - return String.format("FileRecord [%d unused]", referenceNumber); + return String.format("FileRecord [%d unused name='%s']", referenceNumber, fileName); } } From fa67b952d629576784811906f634897860600bc0 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Tue, 29 Oct 2019 17:20:53 +1100 Subject: [PATCH 19/35] NTFS: Fix a possible stack overflow. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index 031548950b..a56dc4bb0e 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -666,15 +666,20 @@ public void readData(int attributeType, String streamName, long fileOffset, byte @Override public String toString() { - // Only look at stored attributes to determine the file name to avoid a possible stack overflow String fileName = null; - for (NTFSAttribute attribute : getAllStoredAttributes()) { - if (attribute.getAttributeType() == NTFSAttribute.Types.FILE_NAME) { - FileNameAttribute fileNameAttribute = (FileNameAttribute) attribute; - if (fileName == null || fileNameAttribute.getNameSpace() == FileNameAttribute.NameSpace.WIN32) { - fileName = fileNameAttribute.getFileName(); + + try { + // Only look at stored attributes to determine the file name to avoid a possible stack overflow + for (NTFSAttribute attribute : getAllStoredAttributes()) { + if (attribute.getAttributeType() == NTFSAttribute.Types.FILE_NAME) { + FileNameAttribute fileNameAttribute = (FileNameAttribute) attribute; + if (fileName == null || fileNameAttribute.getNameSpace() == FileNameAttribute.NameSpace.WIN32) { + fileName = fileNameAttribute.getFileName(); + } } } + } catch (Exception e) { + log.debug("Error getting file name for file record: " + referenceNumber, e); } if (isInUse()) { From 1d02fc41cf29c6f1f0ef42fb9ce4f241c80620ce Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Tue, 29 Oct 2019 17:58:18 +1100 Subject: [PATCH 20/35] FAT: fix logging. --- fs/src/fs/org/jnode/fs/jfat/FatChain.java | 75 +++++++++++------------ 1 file changed, 35 insertions(+), 40 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/jfat/FatChain.java b/fs/src/fs/org/jnode/fs/jfat/FatChain.java index bef938269b..64836c6981 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatChain.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatChain.java @@ -42,8 +42,6 @@ public class FatChain { private int head; private boolean dirty; - private boolean dolog = false; - private ChainPosition position; private ChainIterator iterator; @@ -70,10 +68,6 @@ public void validate() { } } - private void mylog(String msg) { - log.debug(msg); - } - public FatFileSystem getFatFileSystem() { return fs; } @@ -140,8 +134,9 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept if (offset < 0) throw new IllegalArgumentException("offset<0"); - if (dolog) - mylog("n[" + n + "] m[" + m + "] offset[" + offset + "]"); + if (log.isDebugEnabled()) { + log.debug("n[" + n + "] m[" + m + "] offset[" + offset + "]"); + } final int last; int i, found = 0, l = 0; @@ -170,16 +165,16 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept last = l; - if (dolog) - mylog("found[" + found + "] last[" + last + "]"); + if (log.isDebugEnabled()) + log.debug("found[" + found + "] last[" + last + "]"); fat.set(last, fat.eofChain()); - if (dolog) - mylog(n + "\t|allo|\t" + last + " " + fat.eofChain()); + if (log.isDebugEnabled()) + log.debug(n + "\t|allo|\t" + last + " " + fat.eofChain()); if (zero) { - if (dolog) - mylog(n + "\t|ZERO|\t" + last + " " + fat.eofChain()); + if (log.isDebugEnabled()) + log.debug(n + "\t|ZERO|\t" + last + " " + fat.eofChain()); fat.clearCluster(last); } @@ -191,8 +186,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept for (; found < (n - m - k); i--) { if (fat.isFreeEntry(i)) { fat.set(i, l); - if (dolog) - mylog((n - found - 1) + "\t|allo|\t" + i + " " + l); + if (log.isDebugEnabled()) + log.debug((n - found - 1) + "\t|allo|\t" + i + " " + l); l = i; found++; } @@ -203,8 +198,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept if (fat.isFreeEntry(i)) { fat.clearCluster(i, 0, offset); fat.set(i, l); - if (dolog) - mylog((n - found - 1) + "\t|part|\t" + i + " " + l); + if (log.isDebugEnabled()) + log.debug((n - found - 1) + "\t|part|\t" + i + " " + l); l = i; found++; break; @@ -217,8 +212,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept if (fat.isFreeEntry(i)) { fat.clearCluster(i); fat.set(i, l); - if (dolog) - mylog((n - found - 1) + "\t|zero|\t" + i + " " + l); + if (log.isDebugEnabled()) + log.debug((n - found - 1) + "\t|zero|\t" + i + " " + l); l = i; found++; } @@ -234,8 +229,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept } } - if (dolog) - mylog("LastFree: " + fat.getLastFree()); + if (log.isDebugEnabled()) + log.debug("LastFree: " + fat.getLastFree()); return l; } @@ -263,14 +258,14 @@ public void allocateAndClear(int n) throws IOException { int last = allocateTail(n, n - 1, 0, true); int first = getEndCluster(); - if (dolog) - mylog(first + ":" + last); + if (log.isDebugEnabled()) + log.debug(first + ":" + last); if (first != 0) fat.set(first, last); else { - if (dolog) - mylog("allocate chain"); + if (log.isDebugEnabled()) + log.debug("allocate chain"); setStartCluster(last); } } finally { @@ -287,8 +282,8 @@ public void free(int n) throws IOException { if (count < n) throw new IOException("not enough cluster: count[" + count + "] n[" + n + "]"); - if (dolog) - mylog("count[" + count + "] n[" + n + "]"); + if (log.isDebugEnabled()) + log.debug("count[" + count + "] n[" + n + "]"); ChainIterator i; @@ -297,16 +292,16 @@ public void free(int n) throws IOException { i = listIterator(count - n - 1); int l = i.next(); fat.set(l, fat.eofChain()); - if (dolog) - mylog(l + ":" + fat.eofChain()); + if (log.isDebugEnabled()) + log.debug(l + ":" + fat.eofChain()); } else i = listIterator(0); while (i.hasNext()) { int l = i.next(); fat.set(l, fat.freeEntry()); - if (dolog) - mylog(l + ":" + fat.freeEntry()); + if (log.isDebugEnabled()) + log.debug(l + ":" + fat.freeEntry()); } } finally { fat.flush(); @@ -314,8 +309,8 @@ public void free(int n) throws IOException { if (count == n) { setStartCluster(0); - if (dolog) - mylog("zero"); + if (log.isDebugEnabled()) + log.debug("zero"); } } @@ -356,15 +351,15 @@ public void read(long offset, ByteBuffer dst) throws IOException { throw new IOException("attempt to seek after End Of Chain " + offset, ex); } - for (int l = dst.remaining(), sz = p.getPartial(), ofs = p.getOffset(), size; l > 0; l -= - size, sz = p.getSize(), ofs = 0) { + for (int l = dst.remaining(), sz = p.getPartial(), ofs = p.getOffset(), size; l > 0; + l -= size, sz = p.getSize(), ofs = 0) { int cluster = i.next(); size = Math.min(sz, l); - if (dolog) - mylog("read " + size + " bytes from cluster " + cluster + " at offset " + ofs); + if (log.isDebugEnabled()) + log.debug("read " + size + " bytes from cluster " + cluster + " at offset " + ofs); int limit = dst.limit(); @@ -474,8 +469,8 @@ public void write(long length, long offset, ByteBuffer src) throws IOException { size = Math.min(sz, l); - if (dolog) - mylog("write " + size + " bytes to cluster " + cluster + " at offset " + ofs); + if (log.isDebugEnabled()) + log.debug("write " + size + " bytes to cluster " + cluster + " at offset " + ofs); int limit = src.limit(); From 3ca02ac4e60cdd7061da2db659bc49bb8d8368a5 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Wed, 30 Oct 2019 09:44:33 +1100 Subject: [PATCH 21/35] FAT: code tidy up to help track down a defect. --- fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java | 8 +-- .../fs/org/jnode/fs/jfat/FatDotDirEntry.java | 5 +- .../org/jnode/fs/jfat/FatEntriesFactory.java | 64 ++++++++----------- 3 files changed, 29 insertions(+), 48 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java index 3a180a8aae..e752cb0ac0 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java @@ -38,7 +38,7 @@ public class FatDirEntry { protected final FatFileSystem fs; protected final FatMarshal entry; - protected int index; + protected final int index; private boolean lastDirEntry = false; private boolean freeDirEntry = false; @@ -70,12 +70,6 @@ public int getIndex() { return index; } - protected void setIndex(int value) { - if (value < 0) - throw new IllegalArgumentException("value<0"); - index = value; - } - public int length() { return entry.length(); } diff --git a/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java index 8499361066..f2483c6366 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java @@ -40,10 +40,7 @@ public FatDotDirEntry(FatFileSystem fs, boolean dotDot, FatShortDirEntry parent, int startCluster) throws IOException { super(fs); init(parent, startCluster); - if (!dotDot) { - setIndex(0); - } else { - setIndex(1); + if (dotDot) { lName[1] = dot; } encodeName(); diff --git a/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java b/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java index b5776f0391..3cb90da7e9 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java @@ -11,7 +11,6 @@ public class FatEntriesFactory implements Iterator { private boolean label; private int index; - private int next; private FatEntry entry; /** @@ -25,42 +24,39 @@ public class FatEntriesFactory implements Iterator { private FatDirectory directory; public FatEntriesFactory(FatDirectory directory, boolean includeDeleted) { - label = false; - index = 0; - next = 0; - entry = null; this.includeDeleted = includeDeleted; this.directory = directory; } + @Override + public boolean hasNext() { + if (entry == null) { + fetchNext(); + } + + return entry != null; + } + /** - * Returns the index of the entry the factory is up to. - * - * @return the index. + * Fetches the next entry into {@link #entry}. */ - public int getIndex() { - return index; - } + protected void fetchNext() { + if (index > FatDirectory.MAXENTRIES) { + log.debug("Full Directory: invalid index " + index); + } - @Override - public boolean hasNext() { - int i; FatDirEntry dirEntry; FatRecord record = new FatRecord(); + int i = index; - if (index > FatDirectory.MAXENTRIES) - log.debug("Full Directory: invalid index " + index); - - for (i = index;; ) { - /* - * create a new entry from the chain - */ + while (true) { try { + // Read the next entry dirEntry = directory.getFatDirEntry(i, includeDeleted); i++; } catch (NoSuchElementException ex) { entry = null; - return false; + return; } catch (IOException ex) { log.debug("cannot read entry " + i); i++; @@ -101,30 +97,25 @@ public boolean hasNext() { } } else if (dirEntry.isLastDirEntry()) { entry = null; - return false; - } else - throw new UnsupportedOperationException( - "FatDirEntry is of unknown type, shouldn't happen"); + return; + } else { + throw new UnsupportedOperationException("FatDirEntry is of unknown type, shouldn't happen"); + } } - if (!dirEntry.isShortDirEntry()) + if (!dirEntry.isShortDirEntry()) { throw new UnsupportedOperationException("shouldn't happen"); + } record.close((FatShortDirEntry) dirEntry); - /* - * here recursion is in action for the entries factory it creates - * directory nodes and file leafs - */ if (((FatShortDirEntry) dirEntry).isDirectory()) { this.entry = createFatDirectory(record); } else { this.entry = createFatFile(record); } - this.next = i; - - return true; + index = i; } /** @@ -149,15 +140,14 @@ protected FatEntry createFatFile(FatRecord record) { @Override public FatEntry next() { - if (index == next) { - hasNext(); + if (entry == null) { + fetchNext(); } if (entry == null) { throw new NoSuchElementException(); } - index = next; return entry; } From 7ef2b8735369bd705c221742b3d4b517447467d4 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Tue, 5 Nov 2019 08:02:27 +1100 Subject: [PATCH 22/35] FAT: attempt to fix a race condition getting entries by ID. --- fs/src/fs/org/jnode/fs/jfat/FatDirectory.java | 46 +++++++++++-------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java b/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java index 6a10c29bd8..ae83283b23 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java @@ -22,8 +22,8 @@ import java.io.FileNotFoundException; import java.io.IOException; -import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.Map; import java.util.NoSuchElementException; import org.jnode.fs.FSDirectory; @@ -38,7 +38,7 @@ public class FatDirectory extends FatEntry implements FSDirectory, FSDirectoryId /** * The map of ID -> entry. */ - private final Map idMap = new HashMap(); + private final Map idMap = new LinkedHashMap(); /* * for root directory @@ -282,21 +282,23 @@ public synchronized FSEntry getEntry(String name) { } @Override - public FSEntry getEntryById(String id) throws IOException { - FatEntry child = idMap.get(id); + public FSEntry getEntryById(String id) { + synchronized (idMap) { + FatEntry child = idMap.get(id); - if (child == null) { - FatEntriesFactory f = createEntriesFactory(true); + if (child == null) { + FatEntriesFactory f = createEntriesFactory(true); - while (f.hasNext()) { - FatEntry entry = f.next(); - idMap.put(entry.getId(), entry); + while (f.hasNext()) { + FatEntry entry = f.next(); + idMap.put(entry.getId(), entry); + } + + return idMap.get(id); } - return idMap.get(id); + return child; } - - return child; } public FatEntry getEntryByShortName(byte[] shortName) { @@ -358,9 +360,11 @@ public synchronized FSEntry addFile(String name) throws IOException { FatFile file = new FatFile(getFatFileSystem(), this, record); file.flush(); - FatEntry entry = children.put(file); - idMap.put(entry.getId(), entry); - return entry; + synchronized (idMap) { + FatEntry entry = children.put(file); + idMap.put(entry.getId(), entry); + return entry; + } } public synchronized FSEntry addDirectory(String name) throws IOException { @@ -374,9 +378,11 @@ public synchronized FSEntry addDirectory(String name) throws IOException { dir.initialize(); dir.flush(); - FatEntry entry = children.put(dir); - idMap.put(entry.getId(), entry); - return entry; + synchronized (idMap) { + FatEntry entry = children.put(dir); + idMap.put(entry.getId(), entry); + return entry; + } } public synchronized void remove(String name) throws IOException { @@ -399,7 +405,9 @@ public synchronized void remove(String name) throws IOException { dir.flush(); } - idMap.remove(entry.getId()); + synchronized (idMap) { + idMap.remove(entry.getId()); + } } @Override From cf568d58d4b10023b612426b2372dc2922bf572b Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 7 Nov 2019 11:21:59 +1100 Subject: [PATCH 23/35] FAT: some additional debug logging. --- fs/src/fs/org/jnode/fs/jfat/FatDirectory.java | 32 ++++++++++++++++--- fs/src/fs/org/jnode/fs/jfat/FatEntry.java | 3 +- 2 files changed, 30 insertions(+), 5 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java b/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java index ae83283b23..52299e84a4 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java @@ -8,29 +8,35 @@ * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * - * This library is distributed in the hope that it will be useful, but + * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public * License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with this library; If not, write to the Free Software Foundation, Inc., + * along with this library; If not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ - + package org.jnode.fs.jfat; import java.io.FileNotFoundException; import java.io.IOException; import java.util.Iterator; import java.util.LinkedHashMap; +import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; +import org.apache.log4j.Logger; import org.jnode.fs.FSDirectory; import org.jnode.fs.FSDirectoryId; import org.jnode.fs.FSEntry; public class FatDirectory extends FatEntry implements FSDirectory, FSDirectoryId { + private static final Logger log = Logger.getLogger(FatEntriesFactory.class); + + private static final boolean debugEntries = Boolean.getBoolean("org.jnode.fs.jfat.dir.debugEntries"); + public static final int MAXENTRIES = 65535; // 2^16-1; fatgen 1.03, page 33 private final FatTable children = new FatTable(); @@ -287,6 +293,10 @@ public FSEntry getEntryById(String id) { FatEntry child = idMap.get(id); if (child == null) { + if (debugEntries) { + dumpEntriesToLog(); + } + FatEntriesFactory f = createEntriesFactory(true); while (f.hasNext()) { @@ -301,6 +311,20 @@ public FSEntry getEntryById(String id) { } } + private void dumpEntriesToLog() { + StringBuilder builder = new StringBuilder(); + FatEntriesFactory factory = createEntriesFactory(true); + + while (factory.hasNext()) { + FatEntry entry = factory.next(); + builder.append(String.format(Locale.ROOT, "%s index:%d\n", entry, entry.getIndex())); + } + + log.info("Directory Entries for: " + this + "\n" + + "--------------------------------------------------------------------------\n" + + builder + "\n\n\n"); + } + public FatEntry getEntryByShortName(byte[] shortName) { FatEntry child = null; FatEntriesFactory f = createEntriesFactory(false); diff --git a/fs/src/fs/org/jnode/fs/jfat/FatEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatEntry.java index 41e9f6a3ad..9f4d8887d3 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatEntry.java @@ -270,7 +270,8 @@ public String toStringValue() { } public String toString() { - return String.format("FatEntry:[dir:%b start-cluster:%d]:%s", isDirectory(), getStartCluster(), getName()); + return String.format("FatEntry:[dir:%b del:%b start-cluster:%d]:%s", isDirectory(), entry.isFreeDirEntry(), + getStartCluster(), getName()); } public String toDebugString() { From 3adefca6b0b2bfe24feb573ffe64bd7d497f521a Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 7 Nov 2019 11:58:08 +1100 Subject: [PATCH 24/35] FAT: remove the cache which had some bugs. --- fs/src/fs/org/jnode/fs/jfat/Fat.java | 51 +- fs/src/fs/org/jnode/fs/jfat/FatCache.java | 442 ------------------ .../org/jnode/fs/jfat/FatEntriesFactory.java | 4 +- .../fs/org/jnode/fs/jfat/FatFileSystem.java | 1 - 4 files changed, 35 insertions(+), 463 deletions(-) delete mode 100644 fs/src/fs/org/jnode/fs/jfat/FatCache.java diff --git a/fs/src/fs/org/jnode/fs/jfat/Fat.java b/fs/src/fs/org/jnode/fs/jfat/Fat.java index cbbbc56e30..1556ff8763 100644 --- a/fs/src/fs/org/jnode/fs/jfat/Fat.java +++ b/fs/src/fs/org/jnode/fs/jfat/Fat.java @@ -25,6 +25,7 @@ import java.util.Arrays; import org.jnode.driver.block.BlockDeviceAPI; import org.jnode.fs.FileSystemException; +import org.jnode.util.LittleEndian; /** @@ -35,8 +36,6 @@ public abstract class Fat { private final BlockDeviceAPI api; private final BootSector bs; - private final FatCache cache; - private int lastfree; private final ByteBuffer clearbuf; @@ -45,11 +44,6 @@ protected Fat(BootSector bs, BlockDeviceAPI api) { this.bs = bs; this.api = api; - /* - * create a suitable cache - */ - cache = new FatCache(this, 8192, 512); - /* * set lastfree */ @@ -223,20 +217,46 @@ public final boolean isFree(int entry) { return (entry == freeEntry()); } + byte[] readSector(long sector) throws IOException { + byte[] buffer = new byte[512]; + api.read(sector * 512, ByteBuffer.wrap(buffer)); + return buffer; + } + public long getUInt16(int index) throws IOException { - return cache.getUInt16(index); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + return LittleEndian.getUInt16(data, offset); } public long getUInt32(int index) throws IOException { - return cache.getUInt32(index); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + return LittleEndian.getUInt32(data, offset); + } + + void writeSector(long sector, byte[] data) throws IOException { + api.write(sector * 512, ByteBuffer.wrap(data)); } public void setInt16(int index, int element) throws IOException { - cache.setInt16(index, element); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + + LittleEndian.setInt16(data, offset, element); + writeSector(position / 512, data); } public void setInt32(int index, int element) throws IOException { - cache.setInt32(index, element); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + + LittleEndian.setInt32(data, offset, element); + writeSector(position / 512, data); } public abstract int get(int index) throws IOException; @@ -244,7 +264,7 @@ public void setInt32(int index, int element) throws IOException { public abstract int set(int index, int element) throws IOException; public void flush() throws IOException { - cache.flush(); + // Ignore, currently flushing each value as it is set } public final boolean isFreeEntry(int entry) throws IOException { @@ -285,13 +305,6 @@ public final boolean isFat12() { return getBootSector().isFat12(); } - public String getCacheStat() { - StrWriter out = new StrWriter(); - out.println("Access: " + cache.getAccess() + " Hits: " + cache.getHit() + " Ratio: " + - cache.getRatio() * 100 + "%"); - return out.toString(); - } - public String toString() { return String.format("FAT cluster:%d boot sector: %s", getClusterSize(), getBootSector()); } diff --git a/fs/src/fs/org/jnode/fs/jfat/FatCache.java b/fs/src/fs/org/jnode/fs/jfat/FatCache.java deleted file mode 100644 index 459443a2a8..0000000000 --- a/fs/src/fs/org/jnode/fs/jfat/FatCache.java +++ /dev/null @@ -1,442 +0,0 @@ -/* - * $Id$ - * - * Copyright (C) 2003-2015 JNode.org - * - * This library is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; either version 2.1 of the License, or - * (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public - * License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library; If not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -package org.jnode.fs.jfat; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Stack; -import org.jnode.driver.block.BlockDeviceAPI; -import org.jnode.util.LittleEndian; - - -public class FatCache { - - private final float loadFactor = 0.75f; - - private final Fat fat; - private final BlockDeviceAPI api; - private final long fatsize; - private final int nrfats; - - private int elementSize; - - private CacheMap map; - - private long access = 0; - private long hit = 0; - - public FatCache(Fat fat, int cacheSize, int elementSize) { - this.fat = fat; - this.api = fat.getApi(); - this.fatsize = - fat.getBootSector().getSectorsPerFat() * fat.getBootSector().getBytesPerSector(); - this.nrfats = fat.getBootSector().getNrFats(); - this.elementSize = elementSize; - - // allocate the LinkedHashMap - // that do the dirty LRU job - this.map = new CacheMap(cacheSize); - } - - public int getCacheSize() { - return map.getCacheSize(); - } - - public int usedEntries() { - return map.usedEntries(); - } - - public int freeEntries() { - return map.freeEntries(); - } - - private CacheElement put(long address) throws IOException { - /** - * get a CacheElement from the stack object pool - */ - CacheElement c = map.pop(); - - /** - * read the element from the device - */ - c.read(address); - - /** - * and insert the element into the LinkedHashMap - */ - map.put(c); - - /** - * stack "must" contains at least one entry the placeholder ... so let - * it throw an exception if this is false - */ - CacheElement e = map.peek(); - // if an element was discarded from the LRU cache - // now we can free it ... this will send the element - // to storage if is marked as dirty - if (!e.isFree()) - e.free(); - - return c; - } - - private CacheElement get(long address) throws IOException { - CacheElement c = map.get(address); - access++; - - // if the cache contains the element just return it, we have a cache hit - // this will update the LRU order: the LinkedHashMap will make it the - // newest - // - // the cache element cannot be null so we can avoid to call - // containsKey(); - if (c != null) - hit++; - // otherwise put a new element inside the cache - // possibly flushing and discarding the eldest element - else - c = put(address); - - return c; - } - - private long getUInt16(long offset) throws IOException { - long addr = offset / elementSize; - int ofs = (int) (offset % elementSize); - - byte[] data = get(addr).getData(); - return LittleEndian.getUInt16(data, ofs); - } - - private long getUInt32(long offset) throws IOException { - long addr = (long) (offset / elementSize); - int ofs = (int) (offset % elementSize); - - byte[] data = get(addr).getData(); - return LittleEndian.getUInt32(data, ofs); - } - - private void setInt16(long offset, int value) throws IOException { - long addr = offset / elementSize; - int ofs = (int) (offset % elementSize); - - CacheElement c = get(addr); - byte[] data = c.getData(); - - LittleEndian.setInt16(data, ofs, value); - - c.setDirty(); - } - - private void setInt32(long offset, int value) throws IOException { - long addr = (long) (offset / elementSize); - int ofs = (int) (offset % elementSize); - - CacheElement c = get(addr); - byte[] data = c.getData(); - - LittleEndian.setInt32(data, ofs, value); - - c.setDirty(); - } - - public long getUInt16(int index) throws IOException { - return getUInt16(fat.position(0, index)); - } - - public long getUInt32(int index) throws IOException { - return getUInt32(fat.position(0, index)); - } - - public void setInt16(int index, int element) throws IOException { - setInt16(fat.position(0, index), element); - } - - public void setInt32(int index, int element) throws IOException { - setInt32(fat.position(0, index), element); - } - - public void flush(long address) throws IOException { - CacheElement c = map.get(address); - if (c != null) - c.flush(); - } - - public void flush() throws IOException { - for (CacheElement c : map.values()) { - c.flush(); - } - } - - public long getHit() { - return hit; - } - - public long getAccess() { - return access; - } - - public double getRatio() { - if (access > 0) - return ((double) hit / (double) access); - else - return 0.0f; - } - - public String flushOrder() { - return map.flushOrder(); - } - - public String toString() { - StrWriter out = new StrWriter(); - - out.print(map); - out.println("size=" + getCacheSize() + " used=" + usedEntries() + " free=" + freeEntries()); - - return out.toString(); - } - - private class CacheMap extends LinkedHashMap { - private static final long serialVersionUID = 1L; - private final int cacheSize; - private final CacheKey key = new CacheKey(); - private final Stack free = new Stack(); - - private CacheMap(int cacheSize) { - super((int) Math.ceil(cacheSize / loadFactor) + 1, loadFactor, true); - this.cacheSize = cacheSize; - - for (int i = 0; i < cacheSize + 1; i++) - free.push(new CacheElement()); - } - - private int getCacheSize() { - return cacheSize; - } - - private int usedEntries() { - return size(); - } - - private int freeEntries() { - return (free.size() - 1); - } - - private CacheElement peek() { - return free.peek(); - } - - private CacheElement push(CacheElement c) { - return free.push(c); - } - - private CacheElement pop() { - return free.pop(); - } - - private CacheElement get(long address) { - key.set(address); - return get(key); - } - - private CacheElement put(CacheElement c) { - return put(c.getAddress(), c); - } - - /** - * discard the eldest element when the cache is full - */ - protected boolean removeEldestEntry(Map.Entry eldest) { - boolean remove = (size() > cacheSize); - - /** - * before going to discard the eldest push it back on the stacked - * object pool - */ - if (remove) - push(eldest.getValue()); - - return remove; - } - - public String flushOrder() { - StrWriter out = new StrWriter(); - - for (CacheElement c : values()) { - if (c.isDirty()) - out.print("<" + c.getAddress().get() + ">"); - } - - return out.toString(); - } - - public String toString() { - StrWriter out = new StrWriter(); - - for (CacheElement c : values()) - out.println(c); - - return out.toString(); - } - } - - /** - * Here we need to "wrap" a long because Java Long wrapper is an "immutable" - * type - */ - private class CacheKey { - private static final long FREE = -1; - - private long key; - - private CacheKey(long key) { - this.key = key; - } - - private CacheKey() { - free(); - } - - private void free() { - key = FREE; - } - - private boolean isFree() { - return (key == FREE); - } - - private long get() { - return key; - } - - private void set(long value) { - key = value; - } - - public int hashCode() { - return (int) (key ^ (key >>> 32)); - } - - public boolean equals(Object obj) { - return obj instanceof CacheKey && key == ((CacheKey) obj).get(); - } - - public String toString() { - return String.valueOf(key); - } - } - - private class CacheElement { - /** - * CacheKey element is allocated and its reference is stored here to - * avoid to allocate new CacheKey objects at runtime - *

- * In this way .. just one global key will be enough to access - * CacheElements - */ - private boolean dirty; - private CacheKey address; - private final ByteBuffer elem; - - private CacheElement() { - this.dirty = false; - this.address = new CacheKey(); - - // FAT-12 reads in two byte chunks so add an extra element to prevent an array index out of bounds exception - // when reading in the last element - this.elem = ByteBuffer.wrap(new byte[elementSize + 1]); - } - - private boolean isFree() { - return address.isFree(); - } - - private CacheKey getAddress() { - return address; - } - - private byte[] getData() { - return elem.array(); - } - - /** - * some more work is needed in read and write to handle the multiple fat - * availability we have to correcly handle the exception to be sure that - * if we have at least a correct fat we get it - gvt - */ - private void read(long address) throws IOException { - if (!isFree()) - throw new IllegalArgumentException("cannot read a busy element"); - - this.address.set(address); - elem.clear(); - api.read(address * elementSize, elem); - elem.clear(); - } - - private void write() throws IOException { - if (isFree()) - throw new IllegalArgumentException("cannot write a free element"); - - elem.clear(); - - long addr = address.get() * elementSize; - - for (int i = 0; i < nrfats; i++) { - api.write(addr, elem); - addr += fatsize; - elem.clear(); - } - } - - private boolean isDirty() { - return dirty; - } - - private void setDirty() { - dirty = true; - } - - private void flush() throws IOException { - if (isDirty()) { - write(); - dirty = false; - } - } - - private void free() throws IOException { - if (isFree()) - throw new IllegalArgumentException("cannot free a free element"); - flush(); - address.free(); - } - - public String toString() { - StrWriter out = new StrWriter(); - - out.print("address=" + address.get() + " dirty=" + dirty); - - return out.toString(); - } - } -} diff --git a/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java b/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java index 3cb90da7e9..fe58ba1e48 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java @@ -148,7 +148,9 @@ public FatEntry next() { throw new NoSuchElementException(); } - return entry; + FatEntry result = entry; + entry = null; + return result; } @Override diff --git a/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java b/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java index a8874332f8..d9c4a6404a 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java @@ -88,7 +88,6 @@ protected FatRootDirectory createRootEntry() throws IOException { public void flush() throws IOException { super.flush(); fat.flush(); - log.debug(getFat().getCacheStat()); } @Override From d8b8ef4c0daacc23c4c9ccdf5e3cefb03d651dac Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 7 Nov 2019 15:38:32 +1100 Subject: [PATCH 25/35] FAT: Fix a bug introduced in the removal of the cache. --- fs/src/fs/org/jnode/fs/jfat/Fat.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/src/fs/org/jnode/fs/jfat/Fat.java b/fs/src/fs/org/jnode/fs/jfat/Fat.java index 1556ff8763..9b5fc7206b 100644 --- a/fs/src/fs/org/jnode/fs/jfat/Fat.java +++ b/fs/src/fs/org/jnode/fs/jfat/Fat.java @@ -218,7 +218,9 @@ public final boolean isFree(int entry) { } byte[] readSector(long sector) throws IOException { - byte[] buffer = new byte[512]; + // FAT-12 reads in two byte chunks so add an extra element to prevent an array index out of bounds exception + // when reading in the last element + byte[] buffer = new byte[512 + 1]; api.read(sector * 512, ByteBuffer.wrap(buffer)); return buffer; } From 4dfa831ce82db72db89621e0837ec368958e09eb Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Mon, 16 Dec 2019 18:30:07 +1100 Subject: [PATCH 26/35] NTFS: fix an issue where trying to read the MFT would cause an infinite loop in some rare cases. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 70 ++++++++++++++----- .../org/jnode/fs/ntfs/FileRecordSupplier.java | 17 +++++ 2 files changed, 69 insertions(+), 18 deletions(-) create mode 100644 fs/src/fs/org/jnode/fs/ntfs/FileRecordSupplier.java diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index a56dc4bb0e..e12e79e826 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; @@ -422,7 +423,17 @@ public synchronized List getAllAttributes() { attributeList = new ArrayList(getAllStoredAttributes()); } else { log.debug("Attributes in attribute list"); - readAttributeListAttributes(); + attributeList = readAttributeListAttributes(new FileRecordSupplier() { + @Override + public FileRecord getRecord(long referenceNumber) throws IOException { + // When reading the MFT itself don't attempt to check the index is in range + // (we won't know the total MFT length yet) + MasterFileTable mft = getVolume().getMFT(); + return getReferenceNumber() == MasterFileTable.SystemFiles.MFT + ? mft.getRecordUnchecked(referenceNumber) + : mft.getRecord(referenceNumber); + } + }); } } catch (Exception e) { log.error("Error getting attributes for file record: " + referenceNumber + @@ -490,7 +501,24 @@ public Iterator findAttributesByTypeAndName(final int attrTypeID, if (log.isDebugEnabled()) { log.debug("findAttributesByTypeAndName(0x" + NumberUtils.hex(attrTypeID, 4) + "," + name + ")"); } - return new FilteredAttributeIterator(getAllAttributes().iterator()) { + + Iterator attributeIterator = getAllAttributes().iterator(); + + if (attrTypeID == NTFSAttribute.Types.DATA && referenceNumber == MasterFileTable.SystemFiles.MFT) { + List attributes = new ArrayList(); + attributes.addAll(getAllStoredAttributes()); + attributes.addAll(readAttributeListAttributes(new FileRecordSupplier() { + @Override + public FileRecord getRecord(long referenceNumber) { + // When trying to get the $DATA attribute of the MFT, don't attempt to look up any other records + // to avoid possible infinite recursion + return null; + } + })); + attributeIterator = attributes.iterator(); + } + + return new FilteredAttributeIterator(attributeIterator) { @Override protected boolean matches(NTFSAttribute attr) { if (attr.getAttributeType() == attrTypeID) { @@ -691,12 +719,19 @@ public String toString() { /** * Reads in all attributes referenced by the attribute-list attribute. + * + * @param recordSupplier the FILE record supplier. + * @return the list of attributes. */ - private synchronized void readAttributeListAttributes() { + private List readAttributeListAttributes(FileRecordSupplier recordSupplier) { Iterator entryIterator; try { - entryIterator = getAttributeListAttribute().getAllEntries(); + AttributeListAttribute attributeListAttribute = getAttributeListAttribute(); + if (attributeListAttribute == null) { + return Collections.emptyList(); + } + entryIterator = attributeListAttribute.getAllEntries(); } catch (Exception e) { throw new IllegalStateException("Error getting attributes from attribute list, file record: " + referenceNumber, e); @@ -719,20 +754,19 @@ private synchronized void readAttributeListAttributes() { log.debug("Looking up MFT entry for: " + entry.getFileReferenceNumber()); } - // When reading the MFT itself don't attempt to check the index is in range (we won't know the total - // MFT length yet) - MasterFileTable mft = getVolume().getMFT(); - FileRecord holdingRecord = getReferenceNumber() == MasterFileTable.SystemFiles.MFT - ? mft.getRecordUnchecked(entry.getFileReferenceNumber()) - : mft.getRecord(entry.getFileReferenceNumber()); - - attribute = holdingRecord.findStoredAttributeByID(entry.getAttributeID()); - - if (attribute == null) { - log.error(String.format("Failed to find an attribute matching entry '%s' in the holding record, ref=%d", - entry, referenceNumber)); + FileRecord holdingRecord = recordSupplier.getRecord(entry.getFileReferenceNumber()); + if (holdingRecord == null) { + log.error(String.format("Failed to look up holding record %d for entry '%s'", referenceNumber, + entry)); } else { - attributeListBuilder.add(attribute); + attribute = holdingRecord.findStoredAttributeByID(entry.getAttributeID()); + + if (attribute == null) { + log.error(String.format("Failed to find an attribute matching entry '%s' in the holding " + + "record, ref=%d", entry, referenceNumber)); + } else { + attributeListBuilder.add(attribute); + } } } } catch (Exception e) { @@ -741,7 +775,7 @@ private synchronized void readAttributeListAttributes() { } } - attributeList = attributeListBuilder.toList(); + return attributeListBuilder.toList(); } /** diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecordSupplier.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecordSupplier.java new file mode 100644 index 0000000000..061067a986 --- /dev/null +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecordSupplier.java @@ -0,0 +1,17 @@ +package org.jnode.fs.ntfs; + +import java.io.IOException; + +/** + * A FILE record supplier. + */ +public interface FileRecordSupplier { + /** + * Gets a record. + * + * @param referenceNumber the reference number. + * @return the record, or {@code null} if the record cannot be looked up. + * @throws IOException if an error occurs. + */ + FileRecord getRecord(long referenceNumber) throws IOException; +} From 1b60b3b7ae5f9ab869fc94c648a120adce25bf2e Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 23 Jan 2020 14:55:08 +1100 Subject: [PATCH 27/35] NTFS: fix an issue where trying to read the MFT would cause an infinite loop in some rare cases --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index e12e79e826..cb2ef7913c 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -502,7 +502,7 @@ public Iterator findAttributesByTypeAndName(final int attrTypeID, log.debug("findAttributesByTypeAndName(0x" + NumberUtils.hex(attrTypeID, 4) + "," + name + ")"); } - Iterator attributeIterator = getAllAttributes().iterator(); + Iterator attributeIterator; if (attrTypeID == NTFSAttribute.Types.DATA && referenceNumber == MasterFileTable.SystemFiles.MFT) { List attributes = new ArrayList(); @@ -516,6 +516,8 @@ public FileRecord getRecord(long referenceNumber) { } })); attributeIterator = attributes.iterator(); + } else { + attributeIterator = getAllAttributes().iterator(); } return new FilteredAttributeIterator(attributeIterator) { From e83ab788315dbbc34d3bc5e66e0b080653bae424 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Wed, 12 Feb 2020 11:51:36 +1100 Subject: [PATCH 28/35] FS: remove usage of an internal Sun class. --- .../org/jnode/fs/spi/UnixFSAccessRights.java | 5 +- .../fs/org/jnode/fs/spi/UnixFSPrincipal.java | 58 +++++++++++++++++++ 2 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 fs/src/fs/org/jnode/fs/spi/UnixFSPrincipal.java diff --git a/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java b/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java index 6b18842daa..f1ba63a752 100644 --- a/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java +++ b/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java @@ -22,12 +22,9 @@ import java.security.Principal; import java.security.acl.Group; - import org.jnode.fs.FSAccessRights; import org.jnode.fs.FileSystem; -import com.sun.security.auth.UserPrincipal; - /** * * @author Fabien DUMINY (fduminy at jnode.org) @@ -50,7 +47,7 @@ public UnixFSAccessRights(FileSystem filesystem) { this.filesystem = filesystem; // TODO manages users & groups in JNode - owner = new UserPrincipal("root"); + owner = new UnixFSPrincipal("root"); group = new UnixFSGroup("admins"); group.addMember(owner); } diff --git a/fs/src/fs/org/jnode/fs/spi/UnixFSPrincipal.java b/fs/src/fs/org/jnode/fs/spi/UnixFSPrincipal.java new file mode 100644 index 0000000000..173b38c7f7 --- /dev/null +++ b/fs/src/fs/org/jnode/fs/spi/UnixFSPrincipal.java @@ -0,0 +1,58 @@ +/* + * $Id$ + * + * Copyright (C) 2003-2015 JNode.org + * + * This library is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this library; If not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +package org.jnode.fs.spi; + +import java.security.Principal; + +public class UnixFSPrincipal implements Principal { + + private final String name; + + public UnixFSPrincipal(String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object instanceof UnixFSPrincipal) { + return name.equals(((UnixFSPrincipal) object).getName()); + } + return false; + } + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public String toString() { + return name; + } +} From 33adc9db26370090646ffe1a3ff2fd562eb7b959 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Wed, 12 Feb 2020 11:52:11 +1100 Subject: [PATCH 29/35] NTFS: Skip lookups for invalid entries. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index cb2ef7913c..19b5dc829b 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -752,6 +752,11 @@ private List readAttributeListAttributes(FileRecordSupplier recor attribute = findStoredAttributeByID(entry.getAttributeID()); attributeListBuilder.add(attribute); } else { + if (entry.getFileReferenceNumber() == 0) { + log.debug("Skipping lookup for entry: " + entry); + continue; + } + if (log.isDebugEnabled()) { log.debug("Looking up MFT entry for: " + entry.getFileReferenceNumber()); } From 7ea7b8c260dd882f40cc8f18387a4f98e1d1a44a Mon Sep 17 00:00:00 2001 From: vtan01 Date: Tue, 16 Jun 2020 12:40:58 +1000 Subject: [PATCH 30/35] ignore CRC and name hash checks --- fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java | 14 ++++++++------ fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java | 8 ++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java b/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java index f1292119b9..38fad78ddd 100644 --- a/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java +++ b/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java @@ -51,13 +51,13 @@ public class DirectoryParser { private static final int FLAG_CONTIGUOUS = 3; public static DirectoryParser create(Node node) throws IOException { - return create(node, false); + return create(node, false, false); } - public static DirectoryParser create(Node node, boolean showDeleted) throws IOException { + public static DirectoryParser create(Node node, boolean showDeleted, boolean performChecks) throws IOException { assert (node.isDirectory()) : "not a directory"; //NOI18N - final DirectoryParser result = new DirectoryParser(node, showDeleted); + final DirectoryParser result = new DirectoryParser(node, showDeleted, performChecks); result.init(); return result; } @@ -66,13 +66,15 @@ public static DirectoryParser create(Node node, boolean showDeleted) throws IOEx private final ByteBuffer chunk; private final Node node; private boolean showDeleted; + private boolean performChecks; private long cluster; private UpcaseTable upcase; private int index; - private DirectoryParser(Node node, boolean showDeleted) { + private DirectoryParser(Node node, boolean showDeleted, boolean performChecks) { this.node = node; this.showDeleted = showDeleted; + this.performChecks = performChecks; this.sb = node.getSuperBlock(); this.chunk = ByteBuffer.allocate(sb.getBytesPerCluster()); this.chunk.order(ByteOrder.LITTLE_ENDIAN); @@ -272,13 +274,13 @@ private void parseFile(Visitor v, boolean deleted) throws IOException { } } - if (!deleted && referenceChecksum != actualChecksum) { + if (performChecks && !deleted && referenceChecksum != actualChecksum) { throw new IOException("checksum mismatch"); } final String name = nameBuilder.toString(); - if ((this.upcase != null) && (hashName(name) != nameHash)) { + if (performChecks && (this.upcase != null) && (hashName(name) != nameHash)) { throw new IOException("name hash mismatch (" + Integer.toHexString(hashName(name)) + " != " + Integer.toHexString(nameHash) + ")"); diff --git a/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java b/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java index 4259090231..c6fcb7d302 100644 --- a/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java +++ b/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java @@ -43,10 +43,10 @@ public class NodeDirectory extends AbstractFSObject implements FSDirectory, FSDi public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry) throws IOException { - this(fs, nodeEntry, false); + this(fs, nodeEntry, false, false); } - public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry, boolean showDeleted) + public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry, boolean showDeleted, boolean performChecks) throws IOException { super(fs); @@ -57,7 +57,7 @@ public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry, boolean showDelete this.idToNode = new LinkedHashMap(); DirectoryParser. - create(nodeEntry.getNode(), showDeleted). + create(nodeEntry.getNode(), showDeleted, performChecks). setUpcase(this.upcase). parse(new VisitorImpl()); @@ -145,7 +145,7 @@ public void foundBitmap( @Override public void foundUpcaseTable(DirectoryParser parser, long checksum, long startCluster, long size) { - + /* ignore */ } From 6c066d4f3d846f4d8defdbfd1c8a41a2da051619 Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 25 Jun 2020 10:14:21 +1000 Subject: [PATCH 31/35] NTFS: Fix an issue with 4k sector sizes. The fix-up should always apply on 512 byte sectors. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 7 +++---- .../fs/org/jnode/fs/ntfs/MasterFileTable.java | 5 ++--- fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java | 15 +++------------ .../fs/org/jnode/fs/ntfs/index/IndexBlock.java | 2 +- .../fs/org/jnode/fs/ntfs/logfile/LogFile.java | 17 +++++++---------- .../jnode/fs/ntfs/logfile/RecordPageHeader.java | 6 ++---- .../fs/ntfs/logfile/RestartPageHeader.java | 6 ++---- 7 files changed, 20 insertions(+), 38 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index 19b5dc829b..6e61329cc4 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -94,7 +94,7 @@ public class FileRecord extends NTFSRecord { * @param offset offset into the buffer. */ public FileRecord(NTFSVolume volume, long referenceNumber, byte[] buffer, int offset) throws IOException { - this(volume, volume.getBootRecord().getBytesPerSector(), volume.getClusterSize(), true, referenceNumber, + this(volume, volume.getClusterSize(), true, referenceNumber, buffer, offset); } @@ -102,17 +102,16 @@ public FileRecord(NTFSVolume volume, long referenceNumber, byte[] buffer, int of * Initialize this instance. * * @param volume reference to the NTFS volume. - * @param bytesPerSector the number of bytes-per-sector in this volume. * @param clusterSize the cluster size for the volume containing this record. * @param strictFixUp indicates whether an exception should be throw if fix-up values don't match. * @param referenceNumber the reference number of the file within the MFT. * @param buffer data buffer. * @param offset offset into the buffer. */ - public FileRecord(NTFSVolume volume, int bytesPerSector, int clusterSize, boolean strictFixUp, long referenceNumber, + public FileRecord(NTFSVolume volume, int clusterSize, boolean strictFixUp, long referenceNumber, byte[] buffer, int offset) throws IOException { - super(bytesPerSector, strictFixUp, buffer, offset); + super(strictFixUp, buffer, offset); this.volume = volume; this.clusterSize = clusterSize; diff --git a/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java b/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java index 6c671867ef..ffedd04357 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java +++ b/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java @@ -142,15 +142,14 @@ public MasterFileTable(NTFSVolume volume, byte[] buffer, int offset) throws IOEx * Creates a new MFT instance. * * @param volume the NTFS volume. - * @param bytesPerSector the bytes per-sector. * @param clusterSize the cluster size. * @param strictFixUp indicates whether to throw an exception if a fix-up error is detected. * @param buffer the buffer to read from. * @param offset the offset to read at. * @throws IOException if an error occurs creating the MFT. */ - public MasterFileTable(NTFSVolume volume, int bytesPerSector, int clusterSize, boolean strictFixUp, byte[] buffer, int offset) throws IOException { - super(volume, bytesPerSector, clusterSize, strictFixUp, SystemFiles.MFT, buffer, offset); + public MasterFileTable(NTFSVolume volume, int clusterSize, boolean strictFixUp, byte[] buffer, int offset) throws IOException { + super(volume, clusterSize, strictFixUp, SystemFiles.MFT, buffer, offset); } /** diff --git a/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java b/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java index d58bb1d224..4014d519c8 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java @@ -62,36 +62,27 @@ public static class Magic { public static final int INDX = 0x58444e49; } - /** - * The bytes-pre-sector in this NTFS volume. - */ - private final int bytesPerSector; - /** * Creates a new record. * - * @param bytesPerSector the bytes-pre-sector in this NTFS volume. * @param strictFixUp indicates whether an exception should be throw if fix-up values don't match. * @param buffer the buffer to read from. * @param offset the offset in the buffer to read from. */ - public NTFSRecord(int bytesPerSector, boolean strictFixUp, byte[] buffer, int offset) throws IOException { + public NTFSRecord(boolean strictFixUp, byte[] buffer, int offset) throws IOException { super(buffer, offset); - this.bytesPerSector = bytesPerSector; fixUp(strictFixUp); } /** * Creates a new record. * - * @param bytesPerSector the bytes-pre-sector in this NTFS volume. * @param strictFixUp indicates whether an exception should be throw if fix-up values don't match. * @param parent the parent structure. * @param offset the offset in the parent to read from. */ - public NTFSRecord(int bytesPerSector, boolean strictFixUp, NTFSStructure parent, int offset) throws IOException { + public NTFSRecord(boolean strictFixUp, NTFSStructure parent, int offset) throws IOException { super(parent, offset); - this.bytesPerSector = bytesPerSector; fixUp(strictFixUp); } @@ -138,7 +129,7 @@ private void fixUp(boolean strictFixUp) throws IOException { // header for (int i = 1/* intended */; i < usnCount; i++) { - final int bufOffset = (i * bytesPerSector) - 2; + final int bufOffset = (i * 512) - 2; final int usnOffset = updateSequenceOffset + (i * 2); if (getUInt16(bufOffset) == usn) { setUInt16(bufOffset, getUInt16(usnOffset)); diff --git a/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java b/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java index 64e95f7e05..3e20e0beb7 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java +++ b/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java @@ -42,7 +42,7 @@ final class IndexBlock extends NTFSRecord { * @param offset */ public IndexBlock(FileRecord parentFileRecord, byte[] buffer, int offset) throws IOException { - super(parentFileRecord.getVolume().getBootRecord().getBytesPerSector(), true, buffer, offset); + super(true, buffer, offset); this.parentFileRecord = parentFileRecord; } diff --git a/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java b/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java index 918607f4af..7f4b0d5dd1 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java +++ b/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java @@ -11,7 +11,6 @@ import java.util.TreeMap; import org.apache.log4j.Logger; import org.jnode.fs.ntfs.FileRecord; -import org.jnode.fs.ntfs.NTFSVolume; import org.jnode.fs.ntfs.attribute.NTFSAttribute; import org.jnode.fs.util.FSUtils; import org.jnode.util.LittleEndian; @@ -96,7 +95,7 @@ public LogFile(FileRecord fileRecord) throws IOException { fileRecord.readData(0, logFileBuffer, 0, (int) logFileLength); // Read in the restart area info - restartPageHeader = getNewestRestartPageHeader(fileRecord.getVolume(), logFileBuffer); + restartPageHeader = getNewestRestartPageHeader(logFileBuffer); int restartAreaOffset = restartPageHeader.getOffset() + restartPageHeader.getRestartOffset(); logPageSize = restartPageHeader.getLogPageSize(); restartArea = new RestartArea(logFileBuffer, restartAreaOffset); @@ -125,7 +124,7 @@ public LogFile(FileRecord fileRecord) throws IOException { } } - oldestPageOffset = findOldestPageOffset(fileRecord.getVolume()); + oldestPageOffset = findOldestPageOffset(); } /** @@ -266,11 +265,10 @@ private long getNextRecordOffset(LogRecord logRecord, long recordOffset) { /** * Finds the offset to the oldest page, i.e. the one with the lowest LSN. * - * @param volume the volume that holds the log file. * @return the offset to the oldest page. * @throws IOException if an error occurs. */ - private int findOldestPageOffset(NTFSVolume volume) throws IOException { + private int findOldestPageOffset() throws IOException { TreeMap lsnPageMap = new TreeMap(); Map pageOffsetMap = new HashMap(); @@ -286,7 +284,7 @@ private int findOldestPageOffset(NTFSVolume volume) throws IOException { continue; } - RecordPageHeader pageHeader = new RecordPageHeader(volume, logFileBuffer, offset); + RecordPageHeader pageHeader = new RecordPageHeader(logFileBuffer, offset); offsetPageMap.put(offset, pageHeader); // If the last-end-LSN is zero then the page only contains data from the log record on the last page. I.e. @@ -304,20 +302,19 @@ private int findOldestPageOffset(NTFSVolume volume) throws IOException { /** * Gets the restart page header that corresponds to the restart page with the highest current LSN. * - * @param volume the volume that holds the log file. * @param buffer the buffer to read from. * @return the header. * @throws IOException if an error occurs. */ - private RestartPageHeader getNewestRestartPageHeader(NTFSVolume volume, byte[] buffer) throws IOException { - RestartPageHeader restartPageHeader1 = new RestartPageHeader(volume, buffer, 0); + private RestartPageHeader getNewestRestartPageHeader(byte[] buffer) throws IOException { + RestartPageHeader restartPageHeader1 = new RestartPageHeader(buffer, 0); if (!restartPageHeader1.isValid()) { throw new IllegalStateException("Restart header has invalid magic: " + restartPageHeader1.getMagic()); } else if (restartPageHeader1.getMagic() == RestartPageHeader.Magic.CHKD) { log.warn("First $LogFile restart header has check disk magic"); } - RestartPageHeader restartPageHeader2 = new RestartPageHeader(volume, buffer, restartPageHeader1.getLogPageSize()); + RestartPageHeader restartPageHeader2 = new RestartPageHeader(buffer, restartPageHeader1.getLogPageSize()); if (!restartPageHeader2.isValid()) { throw new IllegalStateException("Second restart header has invalid magic: " + restartPageHeader2.getMagic()); } else if (restartPageHeader2.getMagic() == RestartPageHeader.Magic.CHKD) { diff --git a/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java b/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java index d5e15514fb..106891048b 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java +++ b/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java @@ -2,7 +2,6 @@ import java.io.IOException; import org.jnode.fs.ntfs.NTFSRecord; -import org.jnode.fs.ntfs.NTFSVolume; /** * $LogFile record page header. @@ -29,13 +28,12 @@ public static class Magic { /** * Creates a new log file record page header. * - * @param volume the volume that contains this record. * @param buffer the buffer. * @param offset the offset. * @throws IOException if an error occurs during fixup. */ - public RecordPageHeader(NTFSVolume volume, byte[] buffer, int offset) throws IOException { - super(volume.getBootRecord().getBytesPerSector(), true, buffer, offset); + public RecordPageHeader(byte[] buffer, int offset) throws IOException { + super(true, buffer, offset); } /** diff --git a/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java b/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java index b8b29435fb..99f19e7676 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java +++ b/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java @@ -2,7 +2,6 @@ import java.io.IOException; import org.jnode.fs.ntfs.NTFSRecord; -import org.jnode.fs.ntfs.NTFSVolume; /** * $LogFile restart page header @@ -29,13 +28,12 @@ public static class Magic { /** * Creates a new log file restart page header. * - * @param volume the volume that contains this record. * @param buffer the buffer. * @param offset the offset to create the structure at. * @throws IOException if an error occurs during fixup. */ - public RestartPageHeader(NTFSVolume volume, byte[] buffer, int offset) throws IOException { - super(volume.getBootRecord().getBytesPerSector(), true, buffer, offset); + public RestartPageHeader(byte[] buffer, int offset) throws IOException { + super(true, buffer, offset); } /** From e8ce74dc89b20863aaeb9faa8e1b5365b7d5819c Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Thu, 25 Jun 2020 23:50:16 +1000 Subject: [PATCH 32/35] HFS+: remove final from some methods. --- fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java index 44d5c51de2..a5bb66d0f6 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java @@ -32,18 +32,17 @@ public class HfsPlusFileSystemType implements BlockDeviceFileSystemType { public static final Class ID = HfsPlusFileSystemType.class; - public final HfsPlusFileSystem create(final Device device, final boolean readOnly) throws FileSystemException { + public HfsPlusFileSystem create(final Device device, final boolean readOnly) throws FileSystemException { HfsPlusFileSystem fs = new HfsPlusFileSystem(device, readOnly, this); fs.read(); return fs; } - public final String getName() { + public String getName() { return "HFS+"; } - public final boolean supports(final PartitionTableEntry pte, final byte[] firstSector, - final FSBlockDeviceAPI devApi) { + public boolean supports(final PartitionTableEntry pte, final byte[] firstSector, final FSBlockDeviceAPI devApi) { /* * if (pte != null) { if (pte instanceof IBMPartitionTableEntry) { if (((IBMPartitionTableEntry) * pte).getSystemIndicator() != IBMPartitionTypes.PARTTYPE_LINUXNATIVE) { return false; } } } @@ -62,5 +61,4 @@ public final boolean supports(final PartitionTableEntry pte, final byte[] firstS return (magicNumber == SuperBlock.HFSPLUS_SUPER_MAGIC && version == 4) || (magicNumber == SuperBlock.HFSX_SUPER_MAGIC && version == 5); } - } From 9a51e3aefee470122d90c137cc7cdf16c13632cf Mon Sep 17 00:00:00 2001 From: Jessica Knight Date: Thu, 27 Aug 2020 11:57:58 +1000 Subject: [PATCH 33/35] TRIAGE-2820: fix arrayindexoutofbounds when ext2 data is shorter than it says it is --- fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java b/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java index f770af7a64..e6df500843 100644 --- a/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java +++ b/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java @@ -59,8 +59,8 @@ public Ext2DirectoryRecord(Ext2FileSystem fs, byte[] data, int offset, int fileO synchronized (data) { byte[] newData = new byte[Math.max(8, getRecLen())]; int copySize = getRecLen(); - if (copySize + offset > data.length) { - copySize = Math.max(0, copySize - offset); + if (offset + copySize > data.length) { + copySize = data.length - offset; } System.arraycopy(data, offset, newData, 0, copySize); this.data = newData; From 7a1663d60ca7c445b0ca92726388e6fd7c032961 Mon Sep 17 00:00:00 2001 From: ivieira01 <432#@fdSA!!!> Date: Fri, 22 Jan 2021 14:27:53 +1100 Subject: [PATCH 34/35] Reverting change that introduced a regression where $MFTs with AttributeLists were failing to parse. --- fs/src/fs/org/jnode/fs/ntfs/FileRecord.java | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index 6e61329cc4..fb5df7365a 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -500,26 +500,7 @@ public Iterator findAttributesByTypeAndName(final int attrTypeID, if (log.isDebugEnabled()) { log.debug("findAttributesByTypeAndName(0x" + NumberUtils.hex(attrTypeID, 4) + "," + name + ")"); } - - Iterator attributeIterator; - - if (attrTypeID == NTFSAttribute.Types.DATA && referenceNumber == MasterFileTable.SystemFiles.MFT) { - List attributes = new ArrayList(); - attributes.addAll(getAllStoredAttributes()); - attributes.addAll(readAttributeListAttributes(new FileRecordSupplier() { - @Override - public FileRecord getRecord(long referenceNumber) { - // When trying to get the $DATA attribute of the MFT, don't attempt to look up any other records - // to avoid possible infinite recursion - return null; - } - })); - attributeIterator = attributes.iterator(); - } else { - attributeIterator = getAllAttributes().iterator(); - } - - return new FilteredAttributeIterator(attributeIterator) { + return new FilteredAttributeIterator(getAllAttributes().iterator()) { @Override protected boolean matches(NTFSAttribute attr) { if (attr.getAttributeType() == attrTypeID) { From 3b147ffd989cc2d08e610b15fa0515c3a0ede7dd Mon Sep 17 00:00:00 2001 From: Luke Quinane Date: Mon, 22 Mar 2021 15:05:45 +1100 Subject: [PATCH 35/35] Add archival notice --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 3429573cf0..8eb8a45992 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@ # Welcome to JNode! -[![Build Status](https://travis-ci.org/jnode/jnode.svg?branch=master)](https://travis-ci.org/jnode/jnode) +## Archived + +This project has been archived, and moved to https://github.com/Nuix/jnode-fs -In this file, you find the instructions needed to setup a JNode development environment. ## Sub-Projects