diff --git a/README.md b/README.md index 3429573cf0..8eb8a45992 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,9 @@ # Welcome to JNode! -[![Build Status](https://travis-ci.org/jnode/jnode.svg?branch=master)](https://travis-ci.org/jnode/jnode) +## Archived + +This project has been archived, and moved to https://github.com/Nuix/jnode-fs -In this file, you find the instructions needed to setup a JNode development environment. ## Sub-Projects diff --git a/core/src/core/org/jnode/util/LittleEndian.java b/core/src/core/org/jnode/util/LittleEndian.java index 036ccafc44..c3afd8c5fb 100644 --- a/core/src/core/org/jnode/util/LittleEndian.java +++ b/core/src/core/org/jnode/util/LittleEndian.java @@ -126,6 +126,23 @@ public static int getInt32(byte[] src, int offset) { return ((v3 << 24) | (v2 << 16) | (v1 << 8) | v0); } + /** + * Gets a 40-bit signed integer from the given byte array at the given offset. + * + * @param src + * @param offset + */ + public static long getInt40(byte[] src, int offset) { + final long v0 = src[offset + 0] & 0xFF; + final long v1 = src[offset + 1] & 0xFF; + final long v2 = src[offset + 2] & 0xFF; + final long v3 = src[offset + 3] & 0xFF; + final long v4 = src[offset + 4] & 0xFF; + long tmp = (v4 << 32) | (v3 << 24) | (v2 << 16) | (v1 << 8) | v0; + tmp <<= 24; // Shift the value to the top of the 8 bytes in the long, and back to extend any -ve sign + return tmp >> 24; + } + /** * Gets a 48-bit unsigned integer from the given byte array at the given offset. * diff --git a/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java b/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java index 843668913a..38fad78ddd 100644 --- a/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java +++ b/fs/src/fs/org/jnode/fs/exfat/DirectoryParser.java @@ -51,13 +51,13 @@ public class DirectoryParser { private static final int FLAG_CONTIGUOUS = 3; public static DirectoryParser create(Node node) throws IOException { - return create(node, false); + return create(node, false, false); } - public static DirectoryParser create(Node node, boolean showDeleted) throws IOException { + public static DirectoryParser create(Node node, boolean showDeleted, boolean performChecks) throws IOException { assert (node.isDirectory()) : "not a directory"; //NOI18N - final DirectoryParser result = new DirectoryParser(node, showDeleted); + final DirectoryParser result = new DirectoryParser(node, showDeleted, performChecks); result.init(); return result; } @@ -66,13 +66,15 @@ public static DirectoryParser create(Node node, boolean showDeleted) throws IOEx private final ByteBuffer chunk; private final Node node; private boolean showDeleted; + private boolean performChecks; private long cluster; private UpcaseTable upcase; private int index; - private DirectoryParser(Node node, boolean showDeleted) { + private DirectoryParser(Node node, boolean showDeleted, boolean performChecks) { this.node = node; this.showDeleted = showDeleted; + this.performChecks = performChecks; this.sb = node.getSuperBlock(); this.chunk = ByteBuffer.allocate(sb.getBytesPerCluster()); this.chunk.order(ByteOrder.LITTLE_ENDIAN); @@ -232,14 +234,10 @@ private void parseFile(Visitor v, boolean deleted) throws IOException { int nameLen = DeviceAccess.getUint8(chunk); final int nameHash = DeviceAccess.getUint16(chunk); skip(2); /* unknown */ - final long realSize = DeviceAccess.getUint64(chunk); + final long size = DeviceAccess.getUint64(chunk); skip(4); /* unknown */ final long startCluster = DeviceAccess.getUint32(chunk); - final long size = DeviceAccess.getUint64(chunk); - - if (realSize != size) { - throw new IOException("real size does not equal size"); - } + final long allocatedSize = DeviceAccess.getUint64(chunk); conts--; @@ -276,19 +274,19 @@ private void parseFile(Visitor v, boolean deleted) throws IOException { } } - if (!deleted && referenceChecksum != actualChecksum) { + if (performChecks && !deleted && referenceChecksum != actualChecksum) { throw new IOException("checksum mismatch"); } final String name = nameBuilder.toString(); - if ((this.upcase != null) && (hashName(name) != nameHash)) { + if (performChecks && (this.upcase != null) && (hashName(name) != nameHash)) { throw new IOException("name hash mismatch (" + Integer.toHexString(hashName(name)) + " != " + Integer.toHexString(nameHash) + ")"); } - v.foundNode(Node.create(sb, startCluster, attrib, name, (flag == FLAG_CONTIGUOUS), realSize, times, deleted), + v.foundNode(Node.create(sb, startCluster, attrib, name, (flag == FLAG_CONTIGUOUS), size, allocatedSize, times, deleted), index); } diff --git a/fs/src/fs/org/jnode/fs/exfat/Node.java b/fs/src/fs/org/jnode/fs/exfat/Node.java index b94df689c2..731a651882 100644 --- a/fs/src/fs/org/jnode/fs/exfat/Node.java +++ b/fs/src/fs/org/jnode/fs/exfat/Node.java @@ -21,6 +21,7 @@ package org.jnode.fs.exfat; import java.io.IOException; +import org.apache.log4j.Logger; /** * @author Matthias Treydte <waldheinz at gmail.com> @@ -47,16 +48,21 @@ public static Node createRoot(ExFatSuperBlock sb) public static Node create( ExFatSuperBlock sb, long startCluster, int flags, - String name, boolean isContiguous, long size, EntryTimes times, boolean deleted) { + String name, boolean isContiguous, long size, long allocatedSize, EntryTimes times, boolean deleted) { final Node result = new Node(sb, startCluster, times); result.name = name; result.isContiguous = isContiguous; result.size = size; + result.allocatedSize = allocatedSize; result.flags = flags; result.deleted = deleted; + if (allocatedSize < size) { + Logger.getLogger(Node.class).warn("Allocated size less than file size: " + result); + } + return result; } @@ -69,7 +75,17 @@ public static Node create( private long clusterCount; private int flags; private String name; + + /** + * The size of the file in bytes. + */ private long size; + + /** + * The size allocated for the file in bytes. This may be larger than {@code size} if the OS has reserved some space + * for the file to grow into. + */ + private long allocatedSize; private boolean deleted; private Node(ExFatSuperBlock sb, long startCluster, EntryTimes times) { @@ -125,6 +141,10 @@ public long getSize() { return size; } + public long getAllocatedSize() { + return allocatedSize; + } + public boolean isDeleted() { return deleted; } @@ -170,6 +190,10 @@ public String toString() { result.append(this.name); result.append(", contiguous="); result.append(this.isContiguous); + result.append(", size="); + result.append(size); + result.append(", allocated-size="); + result.append(allocatedSize); result.append("]"); return result.toString(); diff --git a/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java b/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java index 4259090231..c6fcb7d302 100644 --- a/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java +++ b/fs/src/fs/org/jnode/fs/exfat/NodeDirectory.java @@ -43,10 +43,10 @@ public class NodeDirectory extends AbstractFSObject implements FSDirectory, FSDi public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry) throws IOException { - this(fs, nodeEntry, false); + this(fs, nodeEntry, false, false); } - public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry, boolean showDeleted) + public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry, boolean showDeleted, boolean performChecks) throws IOException { super(fs); @@ -57,7 +57,7 @@ public NodeDirectory(ExFatFileSystem fs, NodeEntry nodeEntry, boolean showDelete this.idToNode = new LinkedHashMap(); DirectoryParser. - create(nodeEntry.getNode(), showDeleted). + create(nodeEntry.getNode(), showDeleted, performChecks). setUpcase(this.upcase). parse(new VisitorImpl()); @@ -145,7 +145,7 @@ public void foundBitmap( @Override public void foundUpcaseTable(DirectoryParser parser, long checksum, long startCluster, long size) { - + /* ignore */ } diff --git a/fs/src/fs/org/jnode/fs/exfat/NodeFile.java b/fs/src/fs/org/jnode/fs/exfat/NodeFile.java index 97357388c7..c64d4df4bf 100644 --- a/fs/src/fs/org/jnode/fs/exfat/NodeFile.java +++ b/fs/src/fs/org/jnode/fs/exfat/NodeFile.java @@ -39,6 +39,10 @@ public NodeFile(ExFatFileSystem fs, Node node) { this.node = node; } + public Node getNode() { + return node; + } + @Override public long getLength() { return this.node.getSize(); diff --git a/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java b/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java index f770af7a64..e6df500843 100644 --- a/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java +++ b/fs/src/fs/org/jnode/fs/ext2/Ext2DirectoryRecord.java @@ -59,8 +59,8 @@ public Ext2DirectoryRecord(Ext2FileSystem fs, byte[] data, int offset, int fileO synchronized (data) { byte[] newData = new byte[Math.max(8, getRecLen())]; int copySize = getRecLen(); - if (copySize + offset > data.length) { - copySize = Math.max(0, copySize - offset); + if (offset + copySize > data.length) { + copySize = data.length - offset; } System.arraycopy(data, offset, newData, 0, copySize); this.data = newData; diff --git a/fs/src/fs/org/jnode/fs/ext2/INode.java b/fs/src/fs/org/jnode/fs/ext2/INode.java index 690f939f6b..e61f29787a 100644 --- a/fs/src/fs/org/jnode/fs/ext2/INode.java +++ b/fs/src/fs/org/jnode/fs/ext2/INode.java @@ -166,7 +166,7 @@ public Ext2FileSystem getExt2FileSystem() { * @return the extra size. */ public int getExtraISize() { - if (getExt2FileSystem().hasROFeature(Ext2Constants.EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) { + if (getExt2FileSystem().hasROFeature(Ext2Constants.EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE) && data.length > 0x82) { return LittleEndian.getInt16(data, 0x80); } diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java index 6a88e5f960..bccd78da85 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusDirectory.java @@ -218,7 +218,7 @@ private FSEntryTable readEntries() throws IOException { List pathList = new LinkedList(); HfsPlusFileSystem fs = getFileSystem(); if (fs.getVolumeHeader().getFolderCount() > 0) { - LeafRecord[] records; + List records; if ((folder.getFlags() & CatalogFile.FLAGS_HARDLINK_CHAIN) != 0) { records = fs.getCatalog().getRecords(getHardLinkFolder().getFolderId()); diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java index 44d5c51de2..a5bb66d0f6 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java @@ -32,18 +32,17 @@ public class HfsPlusFileSystemType implements BlockDeviceFileSystemType { public static final Class ID = HfsPlusFileSystemType.class; - public final HfsPlusFileSystem create(final Device device, final boolean readOnly) throws FileSystemException { + public HfsPlusFileSystem create(final Device device, final boolean readOnly) throws FileSystemException { HfsPlusFileSystem fs = new HfsPlusFileSystem(device, readOnly, this); fs.read(); return fs; } - public final String getName() { + public String getName() { return "HFS+"; } - public final boolean supports(final PartitionTableEntry pte, final byte[] firstSector, - final FSBlockDeviceAPI devApi) { + public boolean supports(final PartitionTableEntry pte, final byte[] firstSector, final FSBlockDeviceAPI devApi) { /* * if (pte != null) { if (pte instanceof IBMPartitionTableEntry) { if (((IBMPartitionTableEntry) * pte).getSystemIndicator() != IBMPartitionTypes.PARTTYPE_LINUXNATIVE) { return false; } } } @@ -62,5 +61,4 @@ public final boolean supports(final PartitionTableEntry pte, final byte[] firstS return (magicNumber == SuperBlock.HFSPLUS_SUPER_MAGIC && version == 4) || (magicNumber == SuperBlock.HFSX_SUPER_MAGIC && version == 5); } - } diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java index 5052e2ecf3..d3005ab642 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusForkData.java @@ -59,7 +59,7 @@ public class HfsPlusForkData { /** * Overflow extents. */ - private ExtentDescriptor[] overflowExtents; + private List overflowExtents; /** * The catalog node ID that owns this fork. @@ -178,7 +178,7 @@ public Collection getAllExtents(HfsPlusFileSystem fileSystem) // Add the overflow extents if the exist if (overflowExtents != null) { - Collections.addAll(allExtents, overflowExtents); + allExtents.addAll(overflowExtents); } return allExtents; diff --git a/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java b/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java index 5f808f49eb..e0124acb0c 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/HfsUnicodeString.java @@ -22,7 +22,7 @@ import org.jnode.util.BigEndian; -public class HfsUnicodeString { +public class HfsUnicodeString implements Comparable { /** * Length of string in characters. */ @@ -55,7 +55,7 @@ public HfsUnicodeString(final byte[] src, final int offset) { */ public HfsUnicodeString(String string) { this.string = string; - this.length = string.length(); + this.length = string == null ? 0 : string.length(); } public final int getLength() { @@ -83,4 +83,32 @@ public final byte[] getBytes() { public String toString() { return string; } + + @Override + public int hashCode() { + return string == null ? 0 : string.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (!(obj instanceof HfsUnicodeString)) { + return false; + } + + HfsUnicodeString other = (HfsUnicodeString) obj; + return compareTo(other) == 0; + } + + @Override + public int compareTo(HfsUnicodeString other) { + if (string == null && other.string == null) { + return 0; + } else if (string == null) { + return -1; + } else if (other.string == null) { + return 1; + } + + return string.compareTo(other.string); + } } diff --git a/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java b/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java index f885b78580..fa270f45a9 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/attributes/AttributeKey.java @@ -81,8 +81,7 @@ public int compareTo(Key key) { // Note: this is unlikely to be correct. See TN1150 section "Unicode Subtleties" for details // For reading in data is should be safe since the B-Tree will be pre-sorted, but for adding new entries // it will cause the order to be wrong. - result = this.getAttributeName().getUnicodeString() - .compareTo(otherKey.getAttributeName().getUnicodeString()); + result = this.getAttributeName().compareTo(otherKey.getAttributeName()); } } return result; @@ -103,7 +102,8 @@ public boolean equals(Object obj) { return fileId.getId() == otherKey.fileId.getId() && - attributeName.getUnicodeString().equals(otherKey.getAttributeName().getUnicodeString()); + (attributeName.getUnicodeString() == null || otherKey.getAttributeName().getUnicodeString() == null || + attributeName.getUnicodeString().equals(otherKey.getAttributeName().getUnicodeString())); } @Override diff --git a/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java b/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java index 312a86783e..9a5f0c4f78 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/attributes/Attributes.java @@ -22,6 +22,10 @@ import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedList; +import java.util.List; import org.apache.log4j.Logger; import org.jnode.fs.hfsplus.HfsPlusFileSystem; import org.jnode.fs.hfsplus.HfsPlusForkData; @@ -89,12 +93,35 @@ public Attributes(HfsPlusFileSystem fs) throws IOException { } } + /** + * Gets all attributes for the given file. + * + * @param fileId the ID of the file to look up the attributes. + * @return the list of attributes. + * @throws IOException if an error occurs. + */ + public List getAllAttributes(CatalogNodeId fileId) throws IOException { + if (bthr == null) { + return null; + } + + List attributes = new ArrayList(); + + for (LeafRecord record : getAttributeLeafRecords(fileId, null, bthr.getRootNode())) { + if (record != null) { + attributes.add(((AttributeKey) record.getKey()).getAttributeName().getUnicodeString()); + } + } + + return attributes; + } + /** * Looks up an attribute in the attributes file. * - * @param fileId the ID of the file to look up the attribute on. + * @param fileId the ID of the file to look up the attribute on. * @param attributeName the name of the attribute to lookup. - * @return the leaf record, or possibly {code null}. + * @return the attribute data, or possibly {code null}. * @throws IOException if an error occurs. */ public AttributeData getAttribute(CatalogNodeId fileId, String attributeName) throws IOException { @@ -102,25 +129,34 @@ public AttributeData getAttribute(CatalogNodeId fileId, String attributeName) th return null; } - return getAttribute(fileId, attributeName, bthr.getRootNode()); + List records = getAttributeLeafRecords(fileId, attributeName, bthr.getRootNode()); + if (records.isEmpty()) { + return null; + } + + if (records.size() > 1) { + log.warn("Expected a single attribute but got: " + records); + } + + return toAttributeData(fileId, records.get(0)); } /** * Looks up an attribute in the attributes file. * - * @param fileId the ID of the file to look up the attribute on. + * @param fileId the ID of the file to look up the attribute on. * @param attributeName the name of the attribute to lookup. - * @param nodeNumber the index of node where the search begin. - * @return the leaf record, or possibly {code null}. + * @param nodeNumber the index of node where the search begin. + * @return the attribute data, or possibly {code null}. * @throws IOException if an error occurs. */ - public AttributeData getAttribute(CatalogNodeId fileId, String attributeName, long nodeNumber) throws IOException { + public List getAttributeLeafRecords(CatalogNodeId fileId, String attributeName, long nodeNumber) + throws IOException { if (attributesFile.getExtent(0).isEmpty()) { // No attributes - return null; + return Collections.emptyList(); } - LeafRecord leafRecord = null; int nodeSize = bthr.getNodeSize(); ByteBuffer nodeData = ByteBuffer.allocate(nodeSize); attributesFile.read(fs, (nodeNumber * nodeSize), nodeData); @@ -132,21 +168,28 @@ public AttributeData getAttribute(CatalogNodeId fileId, String attributeName, lo AttributeIndexNode node = new AttributeIndexNode(data, nodeSize); IndexRecord[] records = node.findAll(new AttributeKey(fileId, attributeName)); + List leafRecords = new LinkedList(); for (IndexRecord indexRecord : records) { - AttributeData attributeData = getAttribute(fileId, attributeName, indexRecord.getIndex()); - if (attributeData != null) { - return attributeData; - } + leafRecords.addAll(getAttributeLeafRecords(fileId, attributeName, indexRecord.getIndex())); } + return leafRecords; } else if (nodeDescriptor.isLeafNode()) { AttributeLeafNode node = new AttributeLeafNode(data, nodeSize); - leafRecord = node.find(new AttributeKey(fileId, attributeName)); + return node.findAll(new AttributeKey(fileId, attributeName)); + } else { + return Collections.emptyList(); } + } - if (leafRecord == null) { - return null; - } + /** + * Converts a leaf record into an attribute data. + * + * @param fileId the file ID. + * @param leafRecord the leaf record. + * @return the attribute data, or {@code null} if the record cannot be converted. + */ + public AttributeData toAttributeData(CatalogNodeId fileId, LeafRecord leafRecord) { long type = BigEndian.getUInt32(leafRecord.getData(), 0); diff --git a/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java b/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java index 9006b528c7..6efbc744ae 100755 --- a/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java @@ -246,7 +246,7 @@ public final LeafRecord getRecord(final CatalogNodeId parentID) throws IOExcepti * @return Array of LeafRecord * @throws IOException */ - public final LeafRecord[] getRecords(final CatalogNodeId parentID) throws IOException { + public final List getRecords(final CatalogNodeId parentID) throws IOException { return getRecords(parentID, getBTHeaderRecord().getRootNode()); } @@ -259,7 +259,7 @@ public final LeafRecord[] getRecords(final CatalogNodeId parentID) throws IOExce * @return Array of LeafRecord * @throws IOException */ - public final LeafRecord[] getRecords(final CatalogNodeId parentID, final long nodeNumber) + public final List getRecords(final CatalogNodeId parentID, final long nodeNumber) throws IOException { try { long currentNodeNumber = nodeNumber; @@ -273,16 +273,16 @@ public final LeafRecord[] getRecords(final CatalogNodeId parentID, final long no IndexRecord[] records = node.findAll(new CatalogKey(parentID)); List lfList = new LinkedList(); for (IndexRecord rec : records) { - LeafRecord[] lfr = getRecords(parentID, rec.getIndex()); - Collections.addAll(lfList, lfr); + List lfr = getRecords(parentID, rec.getIndex()); + lfList.addAll(lfr); } - return lfList.toArray(new LeafRecord[lfList.size()]); + return lfList; } else if (nd.isLeafNode()) { CatalogLeafNode node = new CatalogLeafNode(nodeData.array(), nodeSize); return node.findAll(new CatalogKey(parentID)); } else { log.info(String.format("Node %d wasn't a leaf or index: %s\n%s", nodeNumber, nd, NumberUtils.hex(datas))); - return new LeafRecord[0]; + return Collections.emptyList(); } } catch (Exception e) { diff --git a/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java b/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java index 074538cb40..5cde603d87 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java @@ -102,7 +102,7 @@ public Extent(HfsPlusFileSystem fs) throws IOException { * @return the overflow extents. * @throws IOException if an error occurs. */ - public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key) throws IOException { + public final List getOverflowExtents(final ExtentKey key) throws IOException { return getOverflowExtents(key, bthr.getRootNode()); } @@ -114,7 +114,7 @@ public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key) throws I * @return the overflow extents. * @throws IOException if an error occurs. */ - public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key, long nodeNumber) throws IOException { + public final List getOverflowExtents(final ExtentKey key, long nodeNumber) throws IOException { try { long currentNodeNumber = nodeNumber; int nodeSize = bthr.getNodeSize(); @@ -129,10 +129,10 @@ public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key, long nod IndexRecord[] records = extentNode.findAll(key); List overflowExtents = new LinkedList(); for (IndexRecord record : records) { - Collections.addAll(overflowExtents, getOverflowExtents(key, record.getIndex())); + overflowExtents.addAll(getOverflowExtents(key, record.getIndex())); } - return overflowExtents.toArray(new ExtentDescriptor[overflowExtents.size()]); + return overflowExtents; } else if (nd.isLeafNode()) { ExtentLeafNode node = new ExtentLeafNode(nodeData.array(), nodeSize); @@ -140,7 +140,7 @@ public final ExtentDescriptor[] getOverflowExtents(final ExtentKey key, long nod } else { log.info(String.format("Node %d wasn't a leaf or index: %s\n%s", nodeNumber, nd, NumberUtils.hex(data))); - return new ExtentDescriptor[0]; + return Collections.emptyList(); } } catch (Exception e) { diff --git a/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java b/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java index 1df4f170b3..b41c16276f 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java @@ -69,7 +69,7 @@ protected LeafRecord createRecord(Key key, byte[] nodeData, int offset, int reco * @param key the key to match. * @return the overflow extents. */ - public ExtentDescriptor[] getOverflowExtents(ExtentKey key) { + public List getOverflowExtents(ExtentKey key) { List overflowExtents = new LinkedList(); for (LeafRecord record : findAll(key)) { @@ -80,6 +80,6 @@ public ExtentDescriptor[] getOverflowExtents(ExtentKey key) { } } - return overflowExtents.toArray(new ExtentDescriptor[overflowExtents.size()]); + return overflowExtents; } } diff --git a/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java b/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java index 0b48ca6902..0476c459b1 100644 --- a/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java +++ b/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractLeafNode.java @@ -53,7 +53,7 @@ protected LeafRecord createRecord(Key key, byte[] nodeData, int offset, int reco return new LeafRecord(key, nodeData, offset, recordSize); } - public final LeafRecord[] findAll(K key) { + public final List findAll(K key) { List list = new LinkedList(); for (LeafRecord record : records) { log.debug("Record: " + record.toString() + " Key: " + key); @@ -62,7 +62,7 @@ public final LeafRecord[] findAll(K key) { list.add(record); } } - return list.toArray(new LeafRecord[list.size()]); + return list; } } diff --git a/fs/src/fs/org/jnode/fs/jfat/Fat.java b/fs/src/fs/org/jnode/fs/jfat/Fat.java index cbbbc56e30..9b5fc7206b 100644 --- a/fs/src/fs/org/jnode/fs/jfat/Fat.java +++ b/fs/src/fs/org/jnode/fs/jfat/Fat.java @@ -25,6 +25,7 @@ import java.util.Arrays; import org.jnode.driver.block.BlockDeviceAPI; import org.jnode.fs.FileSystemException; +import org.jnode.util.LittleEndian; /** @@ -35,8 +36,6 @@ public abstract class Fat { private final BlockDeviceAPI api; private final BootSector bs; - private final FatCache cache; - private int lastfree; private final ByteBuffer clearbuf; @@ -45,11 +44,6 @@ protected Fat(BootSector bs, BlockDeviceAPI api) { this.bs = bs; this.api = api; - /* - * create a suitable cache - */ - cache = new FatCache(this, 8192, 512); - /* * set lastfree */ @@ -223,20 +217,48 @@ public final boolean isFree(int entry) { return (entry == freeEntry()); } + byte[] readSector(long sector) throws IOException { + // FAT-12 reads in two byte chunks so add an extra element to prevent an array index out of bounds exception + // when reading in the last element + byte[] buffer = new byte[512 + 1]; + api.read(sector * 512, ByteBuffer.wrap(buffer)); + return buffer; + } + public long getUInt16(int index) throws IOException { - return cache.getUInt16(index); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + return LittleEndian.getUInt16(data, offset); } public long getUInt32(int index) throws IOException { - return cache.getUInt32(index); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + return LittleEndian.getUInt32(data, offset); + } + + void writeSector(long sector, byte[] data) throws IOException { + api.write(sector * 512, ByteBuffer.wrap(data)); } public void setInt16(int index, int element) throws IOException { - cache.setInt16(index, element); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + + LittleEndian.setInt16(data, offset, element); + writeSector(position / 512, data); } public void setInt32(int index, int element) throws IOException { - cache.setInt32(index, element); + long position = position(0, index); + int offset = (int) (position % 512); + byte[] data = readSector(position / 512); + + LittleEndian.setInt32(data, offset, element); + writeSector(position / 512, data); } public abstract int get(int index) throws IOException; @@ -244,7 +266,7 @@ public void setInt32(int index, int element) throws IOException { public abstract int set(int index, int element) throws IOException; public void flush() throws IOException { - cache.flush(); + // Ignore, currently flushing each value as it is set } public final boolean isFreeEntry(int entry) throws IOException { @@ -285,13 +307,6 @@ public final boolean isFat12() { return getBootSector().isFat12(); } - public String getCacheStat() { - StrWriter out = new StrWriter(); - out.println("Access: " + cache.getAccess() + " Hits: " + cache.getHit() + " Ratio: " + - cache.getRatio() * 100 + "%"); - return out.toString(); - } - public String toString() { return String.format("FAT cluster:%d boot sector: %s", getClusterSize(), getBootSector()); } diff --git a/fs/src/fs/org/jnode/fs/jfat/FatCache.java b/fs/src/fs/org/jnode/fs/jfat/FatCache.java deleted file mode 100644 index 459443a2a8..0000000000 --- a/fs/src/fs/org/jnode/fs/jfat/FatCache.java +++ /dev/null @@ -1,442 +0,0 @@ -/* - * $Id$ - * - * Copyright (C) 2003-2015 JNode.org - * - * This library is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published - * by the Free Software Foundation; either version 2.1 of the License, or - * (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public - * License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with this library; If not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - */ - -package org.jnode.fs.jfat; - -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.Stack; -import org.jnode.driver.block.BlockDeviceAPI; -import org.jnode.util.LittleEndian; - - -public class FatCache { - - private final float loadFactor = 0.75f; - - private final Fat fat; - private final BlockDeviceAPI api; - private final long fatsize; - private final int nrfats; - - private int elementSize; - - private CacheMap map; - - private long access = 0; - private long hit = 0; - - public FatCache(Fat fat, int cacheSize, int elementSize) { - this.fat = fat; - this.api = fat.getApi(); - this.fatsize = - fat.getBootSector().getSectorsPerFat() * fat.getBootSector().getBytesPerSector(); - this.nrfats = fat.getBootSector().getNrFats(); - this.elementSize = elementSize; - - // allocate the LinkedHashMap - // that do the dirty LRU job - this.map = new CacheMap(cacheSize); - } - - public int getCacheSize() { - return map.getCacheSize(); - } - - public int usedEntries() { - return map.usedEntries(); - } - - public int freeEntries() { - return map.freeEntries(); - } - - private CacheElement put(long address) throws IOException { - /** - * get a CacheElement from the stack object pool - */ - CacheElement c = map.pop(); - - /** - * read the element from the device - */ - c.read(address); - - /** - * and insert the element into the LinkedHashMap - */ - map.put(c); - - /** - * stack "must" contains at least one entry the placeholder ... so let - * it throw an exception if this is false - */ - CacheElement e = map.peek(); - // if an element was discarded from the LRU cache - // now we can free it ... this will send the element - // to storage if is marked as dirty - if (!e.isFree()) - e.free(); - - return c; - } - - private CacheElement get(long address) throws IOException { - CacheElement c = map.get(address); - access++; - - // if the cache contains the element just return it, we have a cache hit - // this will update the LRU order: the LinkedHashMap will make it the - // newest - // - // the cache element cannot be null so we can avoid to call - // containsKey(); - if (c != null) - hit++; - // otherwise put a new element inside the cache - // possibly flushing and discarding the eldest element - else - c = put(address); - - return c; - } - - private long getUInt16(long offset) throws IOException { - long addr = offset / elementSize; - int ofs = (int) (offset % elementSize); - - byte[] data = get(addr).getData(); - return LittleEndian.getUInt16(data, ofs); - } - - private long getUInt32(long offset) throws IOException { - long addr = (long) (offset / elementSize); - int ofs = (int) (offset % elementSize); - - byte[] data = get(addr).getData(); - return LittleEndian.getUInt32(data, ofs); - } - - private void setInt16(long offset, int value) throws IOException { - long addr = offset / elementSize; - int ofs = (int) (offset % elementSize); - - CacheElement c = get(addr); - byte[] data = c.getData(); - - LittleEndian.setInt16(data, ofs, value); - - c.setDirty(); - } - - private void setInt32(long offset, int value) throws IOException { - long addr = (long) (offset / elementSize); - int ofs = (int) (offset % elementSize); - - CacheElement c = get(addr); - byte[] data = c.getData(); - - LittleEndian.setInt32(data, ofs, value); - - c.setDirty(); - } - - public long getUInt16(int index) throws IOException { - return getUInt16(fat.position(0, index)); - } - - public long getUInt32(int index) throws IOException { - return getUInt32(fat.position(0, index)); - } - - public void setInt16(int index, int element) throws IOException { - setInt16(fat.position(0, index), element); - } - - public void setInt32(int index, int element) throws IOException { - setInt32(fat.position(0, index), element); - } - - public void flush(long address) throws IOException { - CacheElement c = map.get(address); - if (c != null) - c.flush(); - } - - public void flush() throws IOException { - for (CacheElement c : map.values()) { - c.flush(); - } - } - - public long getHit() { - return hit; - } - - public long getAccess() { - return access; - } - - public double getRatio() { - if (access > 0) - return ((double) hit / (double) access); - else - return 0.0f; - } - - public String flushOrder() { - return map.flushOrder(); - } - - public String toString() { - StrWriter out = new StrWriter(); - - out.print(map); - out.println("size=" + getCacheSize() + " used=" + usedEntries() + " free=" + freeEntries()); - - return out.toString(); - } - - private class CacheMap extends LinkedHashMap { - private static final long serialVersionUID = 1L; - private final int cacheSize; - private final CacheKey key = new CacheKey(); - private final Stack free = new Stack(); - - private CacheMap(int cacheSize) { - super((int) Math.ceil(cacheSize / loadFactor) + 1, loadFactor, true); - this.cacheSize = cacheSize; - - for (int i = 0; i < cacheSize + 1; i++) - free.push(new CacheElement()); - } - - private int getCacheSize() { - return cacheSize; - } - - private int usedEntries() { - return size(); - } - - private int freeEntries() { - return (free.size() - 1); - } - - private CacheElement peek() { - return free.peek(); - } - - private CacheElement push(CacheElement c) { - return free.push(c); - } - - private CacheElement pop() { - return free.pop(); - } - - private CacheElement get(long address) { - key.set(address); - return get(key); - } - - private CacheElement put(CacheElement c) { - return put(c.getAddress(), c); - } - - /** - * discard the eldest element when the cache is full - */ - protected boolean removeEldestEntry(Map.Entry eldest) { - boolean remove = (size() > cacheSize); - - /** - * before going to discard the eldest push it back on the stacked - * object pool - */ - if (remove) - push(eldest.getValue()); - - return remove; - } - - public String flushOrder() { - StrWriter out = new StrWriter(); - - for (CacheElement c : values()) { - if (c.isDirty()) - out.print("<" + c.getAddress().get() + ">"); - } - - return out.toString(); - } - - public String toString() { - StrWriter out = new StrWriter(); - - for (CacheElement c : values()) - out.println(c); - - return out.toString(); - } - } - - /** - * Here we need to "wrap" a long because Java Long wrapper is an "immutable" - * type - */ - private class CacheKey { - private static final long FREE = -1; - - private long key; - - private CacheKey(long key) { - this.key = key; - } - - private CacheKey() { - free(); - } - - private void free() { - key = FREE; - } - - private boolean isFree() { - return (key == FREE); - } - - private long get() { - return key; - } - - private void set(long value) { - key = value; - } - - public int hashCode() { - return (int) (key ^ (key >>> 32)); - } - - public boolean equals(Object obj) { - return obj instanceof CacheKey && key == ((CacheKey) obj).get(); - } - - public String toString() { - return String.valueOf(key); - } - } - - private class CacheElement { - /** - * CacheKey element is allocated and its reference is stored here to - * avoid to allocate new CacheKey objects at runtime - *

- * In this way .. just one global key will be enough to access - * CacheElements - */ - private boolean dirty; - private CacheKey address; - private final ByteBuffer elem; - - private CacheElement() { - this.dirty = false; - this.address = new CacheKey(); - - // FAT-12 reads in two byte chunks so add an extra element to prevent an array index out of bounds exception - // when reading in the last element - this.elem = ByteBuffer.wrap(new byte[elementSize + 1]); - } - - private boolean isFree() { - return address.isFree(); - } - - private CacheKey getAddress() { - return address; - } - - private byte[] getData() { - return elem.array(); - } - - /** - * some more work is needed in read and write to handle the multiple fat - * availability we have to correcly handle the exception to be sure that - * if we have at least a correct fat we get it - gvt - */ - private void read(long address) throws IOException { - if (!isFree()) - throw new IllegalArgumentException("cannot read a busy element"); - - this.address.set(address); - elem.clear(); - api.read(address * elementSize, elem); - elem.clear(); - } - - private void write() throws IOException { - if (isFree()) - throw new IllegalArgumentException("cannot write a free element"); - - elem.clear(); - - long addr = address.get() * elementSize; - - for (int i = 0; i < nrfats; i++) { - api.write(addr, elem); - addr += fatsize; - elem.clear(); - } - } - - private boolean isDirty() { - return dirty; - } - - private void setDirty() { - dirty = true; - } - - private void flush() throws IOException { - if (isDirty()) { - write(); - dirty = false; - } - } - - private void free() throws IOException { - if (isFree()) - throw new IllegalArgumentException("cannot free a free element"); - flush(); - address.free(); - } - - public String toString() { - StrWriter out = new StrWriter(); - - out.print("address=" + address.get() + " dirty=" + dirty); - - return out.toString(); - } - } -} diff --git a/fs/src/fs/org/jnode/fs/jfat/FatChain.java b/fs/src/fs/org/jnode/fs/jfat/FatChain.java index bef938269b..64836c6981 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatChain.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatChain.java @@ -42,8 +42,6 @@ public class FatChain { private int head; private boolean dirty; - private boolean dolog = false; - private ChainPosition position; private ChainIterator iterator; @@ -70,10 +68,6 @@ public void validate() { } } - private void mylog(String msg) { - log.debug(msg); - } - public FatFileSystem getFatFileSystem() { return fs; } @@ -140,8 +134,9 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept if (offset < 0) throw new IllegalArgumentException("offset<0"); - if (dolog) - mylog("n[" + n + "] m[" + m + "] offset[" + offset + "]"); + if (log.isDebugEnabled()) { + log.debug("n[" + n + "] m[" + m + "] offset[" + offset + "]"); + } final int last; int i, found = 0, l = 0; @@ -170,16 +165,16 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept last = l; - if (dolog) - mylog("found[" + found + "] last[" + last + "]"); + if (log.isDebugEnabled()) + log.debug("found[" + found + "] last[" + last + "]"); fat.set(last, fat.eofChain()); - if (dolog) - mylog(n + "\t|allo|\t" + last + " " + fat.eofChain()); + if (log.isDebugEnabled()) + log.debug(n + "\t|allo|\t" + last + " " + fat.eofChain()); if (zero) { - if (dolog) - mylog(n + "\t|ZERO|\t" + last + " " + fat.eofChain()); + if (log.isDebugEnabled()) + log.debug(n + "\t|ZERO|\t" + last + " " + fat.eofChain()); fat.clearCluster(last); } @@ -191,8 +186,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept for (; found < (n - m - k); i--) { if (fat.isFreeEntry(i)) { fat.set(i, l); - if (dolog) - mylog((n - found - 1) + "\t|allo|\t" + i + " " + l); + if (log.isDebugEnabled()) + log.debug((n - found - 1) + "\t|allo|\t" + i + " " + l); l = i; found++; } @@ -203,8 +198,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept if (fat.isFreeEntry(i)) { fat.clearCluster(i, 0, offset); fat.set(i, l); - if (dolog) - mylog((n - found - 1) + "\t|part|\t" + i + " " + l); + if (log.isDebugEnabled()) + log.debug((n - found - 1) + "\t|part|\t" + i + " " + l); l = i; found++; break; @@ -217,8 +212,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept if (fat.isFreeEntry(i)) { fat.clearCluster(i); fat.set(i, l); - if (dolog) - mylog((n - found - 1) + "\t|zero|\t" + i + " " + l); + if (log.isDebugEnabled()) + log.debug((n - found - 1) + "\t|zero|\t" + i + " " + l); l = i; found++; } @@ -234,8 +229,8 @@ private int allocateTail(int n, int m, int offset, boolean zero) throws IOExcept } } - if (dolog) - mylog("LastFree: " + fat.getLastFree()); + if (log.isDebugEnabled()) + log.debug("LastFree: " + fat.getLastFree()); return l; } @@ -263,14 +258,14 @@ public void allocateAndClear(int n) throws IOException { int last = allocateTail(n, n - 1, 0, true); int first = getEndCluster(); - if (dolog) - mylog(first + ":" + last); + if (log.isDebugEnabled()) + log.debug(first + ":" + last); if (first != 0) fat.set(first, last); else { - if (dolog) - mylog("allocate chain"); + if (log.isDebugEnabled()) + log.debug("allocate chain"); setStartCluster(last); } } finally { @@ -287,8 +282,8 @@ public void free(int n) throws IOException { if (count < n) throw new IOException("not enough cluster: count[" + count + "] n[" + n + "]"); - if (dolog) - mylog("count[" + count + "] n[" + n + "]"); + if (log.isDebugEnabled()) + log.debug("count[" + count + "] n[" + n + "]"); ChainIterator i; @@ -297,16 +292,16 @@ public void free(int n) throws IOException { i = listIterator(count - n - 1); int l = i.next(); fat.set(l, fat.eofChain()); - if (dolog) - mylog(l + ":" + fat.eofChain()); + if (log.isDebugEnabled()) + log.debug(l + ":" + fat.eofChain()); } else i = listIterator(0); while (i.hasNext()) { int l = i.next(); fat.set(l, fat.freeEntry()); - if (dolog) - mylog(l + ":" + fat.freeEntry()); + if (log.isDebugEnabled()) + log.debug(l + ":" + fat.freeEntry()); } } finally { fat.flush(); @@ -314,8 +309,8 @@ public void free(int n) throws IOException { if (count == n) { setStartCluster(0); - if (dolog) - mylog("zero"); + if (log.isDebugEnabled()) + log.debug("zero"); } } @@ -356,15 +351,15 @@ public void read(long offset, ByteBuffer dst) throws IOException { throw new IOException("attempt to seek after End Of Chain " + offset, ex); } - for (int l = dst.remaining(), sz = p.getPartial(), ofs = p.getOffset(), size; l > 0; l -= - size, sz = p.getSize(), ofs = 0) { + for (int l = dst.remaining(), sz = p.getPartial(), ofs = p.getOffset(), size; l > 0; + l -= size, sz = p.getSize(), ofs = 0) { int cluster = i.next(); size = Math.min(sz, l); - if (dolog) - mylog("read " + size + " bytes from cluster " + cluster + " at offset " + ofs); + if (log.isDebugEnabled()) + log.debug("read " + size + " bytes from cluster " + cluster + " at offset " + ofs); int limit = dst.limit(); @@ -474,8 +469,8 @@ public void write(long length, long offset, ByteBuffer src) throws IOException { size = Math.min(sz, l); - if (dolog) - mylog("write " + size + " bytes to cluster " + cluster + " at offset " + ofs); + if (log.isDebugEnabled()) + log.debug("write " + size + " bytes to cluster " + cluster + " at offset " + ofs); int limit = src.limit(); diff --git a/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java index 3a180a8aae..e752cb0ac0 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatDirEntry.java @@ -38,7 +38,7 @@ public class FatDirEntry { protected final FatFileSystem fs; protected final FatMarshal entry; - protected int index; + protected final int index; private boolean lastDirEntry = false; private boolean freeDirEntry = false; @@ -70,12 +70,6 @@ public int getIndex() { return index; } - protected void setIndex(int value) { - if (value < 0) - throw new IllegalArgumentException("value<0"); - index = value; - } - public int length() { return entry.length(); } diff --git a/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java b/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java index 6a10c29bd8..52299e84a4 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatDirectory.java @@ -8,29 +8,35 @@ * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * - * This library is distributed in the hope that it will be useful, but + * This library is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY - * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public * License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with this library; If not, write to the Free Software Foundation, Inc., + * along with this library; If not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ - + package org.jnode.fs.jfat; import java.io.FileNotFoundException; import java.io.IOException; -import java.util.HashMap; import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.Locale; import java.util.Map; import java.util.NoSuchElementException; +import org.apache.log4j.Logger; import org.jnode.fs.FSDirectory; import org.jnode.fs.FSDirectoryId; import org.jnode.fs.FSEntry; public class FatDirectory extends FatEntry implements FSDirectory, FSDirectoryId { + private static final Logger log = Logger.getLogger(FatEntriesFactory.class); + + private static final boolean debugEntries = Boolean.getBoolean("org.jnode.fs.jfat.dir.debugEntries"); + public static final int MAXENTRIES = 65535; // 2^16-1; fatgen 1.03, page 33 private final FatTable children = new FatTable(); @@ -38,7 +44,7 @@ public class FatDirectory extends FatEntry implements FSDirectory, FSDirectoryId /** * The map of ID -> entry. */ - private final Map idMap = new HashMap(); + private final Map idMap = new LinkedHashMap(); /* * for root directory @@ -282,21 +288,41 @@ public synchronized FSEntry getEntry(String name) { } @Override - public FSEntry getEntryById(String id) throws IOException { - FatEntry child = idMap.get(id); + public FSEntry getEntryById(String id) { + synchronized (idMap) { + FatEntry child = idMap.get(id); - if (child == null) { - FatEntriesFactory f = createEntriesFactory(true); + if (child == null) { + if (debugEntries) { + dumpEntriesToLog(); + } - while (f.hasNext()) { - FatEntry entry = f.next(); - idMap.put(entry.getId(), entry); + FatEntriesFactory f = createEntriesFactory(true); + + while (f.hasNext()) { + FatEntry entry = f.next(); + idMap.put(entry.getId(), entry); + } + + return idMap.get(id); } - return idMap.get(id); + return child; } + } - return child; + private void dumpEntriesToLog() { + StringBuilder builder = new StringBuilder(); + FatEntriesFactory factory = createEntriesFactory(true); + + while (factory.hasNext()) { + FatEntry entry = factory.next(); + builder.append(String.format(Locale.ROOT, "%s index:%d\n", entry, entry.getIndex())); + } + + log.info("Directory Entries for: " + this + "\n" + + "--------------------------------------------------------------------------\n" + + builder + "\n\n\n"); } public FatEntry getEntryByShortName(byte[] shortName) { @@ -358,9 +384,11 @@ public synchronized FSEntry addFile(String name) throws IOException { FatFile file = new FatFile(getFatFileSystem(), this, record); file.flush(); - FatEntry entry = children.put(file); - idMap.put(entry.getId(), entry); - return entry; + synchronized (idMap) { + FatEntry entry = children.put(file); + idMap.put(entry.getId(), entry); + return entry; + } } public synchronized FSEntry addDirectory(String name) throws IOException { @@ -374,9 +402,11 @@ public synchronized FSEntry addDirectory(String name) throws IOException { dir.initialize(); dir.flush(); - FatEntry entry = children.put(dir); - idMap.put(entry.getId(), entry); - return entry; + synchronized (idMap) { + FatEntry entry = children.put(dir); + idMap.put(entry.getId(), entry); + return entry; + } } public synchronized void remove(String name) throws IOException { @@ -399,7 +429,9 @@ public synchronized void remove(String name) throws IOException { dir.flush(); } - idMap.remove(entry.getId()); + synchronized (idMap) { + idMap.remove(entry.getId()); + } } @Override diff --git a/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java index 8499361066..f2483c6366 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatDotDirEntry.java @@ -40,10 +40,7 @@ public FatDotDirEntry(FatFileSystem fs, boolean dotDot, FatShortDirEntry parent, int startCluster) throws IOException { super(fs); init(parent, startCluster); - if (!dotDot) { - setIndex(0); - } else { - setIndex(1); + if (dotDot) { lName[1] = dot; } encodeName(); diff --git a/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java b/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java index b5776f0391..fe58ba1e48 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatEntriesFactory.java @@ -11,7 +11,6 @@ public class FatEntriesFactory implements Iterator { private boolean label; private int index; - private int next; private FatEntry entry; /** @@ -25,42 +24,39 @@ public class FatEntriesFactory implements Iterator { private FatDirectory directory; public FatEntriesFactory(FatDirectory directory, boolean includeDeleted) { - label = false; - index = 0; - next = 0; - entry = null; this.includeDeleted = includeDeleted; this.directory = directory; } + @Override + public boolean hasNext() { + if (entry == null) { + fetchNext(); + } + + return entry != null; + } + /** - * Returns the index of the entry the factory is up to. - * - * @return the index. + * Fetches the next entry into {@link #entry}. */ - public int getIndex() { - return index; - } + protected void fetchNext() { + if (index > FatDirectory.MAXENTRIES) { + log.debug("Full Directory: invalid index " + index); + } - @Override - public boolean hasNext() { - int i; FatDirEntry dirEntry; FatRecord record = new FatRecord(); + int i = index; - if (index > FatDirectory.MAXENTRIES) - log.debug("Full Directory: invalid index " + index); - - for (i = index;; ) { - /* - * create a new entry from the chain - */ + while (true) { try { + // Read the next entry dirEntry = directory.getFatDirEntry(i, includeDeleted); i++; } catch (NoSuchElementException ex) { entry = null; - return false; + return; } catch (IOException ex) { log.debug("cannot read entry " + i); i++; @@ -101,30 +97,25 @@ public boolean hasNext() { } } else if (dirEntry.isLastDirEntry()) { entry = null; - return false; - } else - throw new UnsupportedOperationException( - "FatDirEntry is of unknown type, shouldn't happen"); + return; + } else { + throw new UnsupportedOperationException("FatDirEntry is of unknown type, shouldn't happen"); + } } - if (!dirEntry.isShortDirEntry()) + if (!dirEntry.isShortDirEntry()) { throw new UnsupportedOperationException("shouldn't happen"); + } record.close((FatShortDirEntry) dirEntry); - /* - * here recursion is in action for the entries factory it creates - * directory nodes and file leafs - */ if (((FatShortDirEntry) dirEntry).isDirectory()) { this.entry = createFatDirectory(record); } else { this.entry = createFatFile(record); } - this.next = i; - - return true; + index = i; } /** @@ -149,16 +140,17 @@ protected FatEntry createFatFile(FatRecord record) { @Override public FatEntry next() { - if (index == next) { - hasNext(); + if (entry == null) { + fetchNext(); } if (entry == null) { throw new NoSuchElementException(); } - index = next; - return entry; + FatEntry result = entry; + entry = null; + return result; } @Override diff --git a/fs/src/fs/org/jnode/fs/jfat/FatEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatEntry.java index 41e9f6a3ad..9f4d8887d3 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatEntry.java @@ -270,7 +270,8 @@ public String toStringValue() { } public String toString() { - return String.format("FatEntry:[dir:%b start-cluster:%d]:%s", isDirectory(), getStartCluster(), getName()); + return String.format("FatEntry:[dir:%b del:%b start-cluster:%d]:%s", isDirectory(), entry.isFreeDirEntry(), + getStartCluster(), getName()); } public String toDebugString() { diff --git a/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java b/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java index a8874332f8..d9c4a6404a 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatFileSystem.java @@ -88,7 +88,6 @@ protected FatRootDirectory createRootEntry() throws IOException { public void flush() throws IOException { super.flush(); fat.flush(); - log.debug(getFat().getCacheStat()); } @Override diff --git a/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java b/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java index 041368beb5..d4a07a19ea 100644 --- a/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java +++ b/fs/src/fs/org/jnode/fs/jfat/FatShortDirEntry.java @@ -308,7 +308,7 @@ public boolean isShortDirEntry() { return true; } - private FatCase getNameCase() { + public FatCase getNameCase() { return ncase; } @@ -411,6 +411,10 @@ public String getBase() { return base; } + protected void setBase(String base) { + this.base = base; + } + public String getExt() { return ext; } diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java index dcc5d4b05d..fb5df7365a 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecord.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; @@ -93,7 +94,7 @@ public class FileRecord extends NTFSRecord { * @param offset offset into the buffer. */ public FileRecord(NTFSVolume volume, long referenceNumber, byte[] buffer, int offset) throws IOException { - this(volume, volume.getBootRecord().getBytesPerSector(), volume.getClusterSize(), true, referenceNumber, + this(volume, volume.getClusterSize(), true, referenceNumber, buffer, offset); } @@ -101,26 +102,20 @@ public FileRecord(NTFSVolume volume, long referenceNumber, byte[] buffer, int of * Initialize this instance. * * @param volume reference to the NTFS volume. - * @param bytesPerSector the number of bytes-per-sector in this volume. * @param clusterSize the cluster size for the volume containing this record. * @param strictFixUp indicates whether an exception should be throw if fix-up values don't match. * @param referenceNumber the reference number of the file within the MFT. * @param buffer data buffer. * @param offset offset into the buffer. */ - public FileRecord(NTFSVolume volume, int bytesPerSector, int clusterSize, boolean strictFixUp, long referenceNumber, + public FileRecord(NTFSVolume volume, int clusterSize, boolean strictFixUp, long referenceNumber, byte[] buffer, int offset) throws IOException { - super(bytesPerSector, strictFixUp, buffer, offset); + super(strictFixUp, buffer, offset); this.volume = volume; this.clusterSize = clusterSize; this.referenceNumber = referenceNumber; - - storedAttributeList = readStoredAttributes(); - - // Linux NTFS docs say there can only be one of these, so I'll believe them. - attributeListAttribute = (AttributeListAttribute) findStoredAttributeByType(NTFSAttribute.Types.ATTRIBUTE_LIST); } /** @@ -129,20 +124,22 @@ public FileRecord(NTFSVolume volume, int bytesPerSector, int clusterSize, boolea * @throws IOException if an error occurs. */ public void checkIfValid() throws IOException { - // check for the magic number to see if we have a filerecord + // check for the magic number to see if we have a file record if (getMagic() != Magic.FILE) { - log.debug("Invalid magic number found for FILE record: " + getMagic() + " -- dumping buffer"); - for (int off = 0; off < getBuffer().length; off += 32) { - StringBuilder builder = new StringBuilder(); - for (int i = off; i < off + 32 && i < getBuffer().length; i++) { - String hex = Integer.toHexString(getBuffer()[i]); - while (hex.length() < 2) { - hex = '0' + hex; - } + if (log.isDebugEnabled()) { + log.debug("Invalid magic number found for FILE record: " + getMagic() + " -- dumping buffer"); + for (int off = 0; off < getBuffer().length; off += 32) { + StringBuilder builder = new StringBuilder(); + for (int i = off; i < off + 32 && i < getBuffer().length; i++) { + String hex = Integer.toHexString(getBuffer()[i]); + while (hex.length() < 2) { + hex = '0' + hex; + } - builder.append(' ').append(hex); + builder.append(' ').append(hex); + } + log.debug(builder.toString()); } - log.debug(builder.toString()); } throw new IOException("Invalid magic found: " + getMagic()); @@ -359,6 +356,9 @@ public FileNameAttribute getFileNameAttribute() { * @return an iterator over attributes stored in this file record. */ public List getAllStoredAttributes() { + if (storedAttributeList == null) { + storedAttributeList = readStoredAttributes(); + } return storedAttributeList; } @@ -369,7 +369,7 @@ public List getAllStoredAttributes() { * @return the attribute found, or {@code null} if not found. */ private NTFSAttribute findStoredAttributeByID(int id) { - for (NTFSAttribute attr : storedAttributeList) { + for (NTFSAttribute attr : getAllStoredAttributes()) { if (attr != null && attr.getAttributeID() == id) { return attr; } @@ -385,7 +385,7 @@ private NTFSAttribute findStoredAttributeByID(int id) { * @see NTFSAttribute.Types */ private NTFSAttribute findStoredAttributeByType(int typeID) { - for (NTFSAttribute attr : storedAttributeList) { + for (NTFSAttribute attr : getAllStoredAttributes()) { if (attr != null && attr.getAttributeType() == typeID) { return attr; } @@ -393,6 +393,21 @@ private NTFSAttribute findStoredAttributeByType(int typeID) { return null; } + /** + * Gets the attributes list attribute, if the record has one. + * + * @return the attribute, or {@code null}. + */ + public AttributeListAttribute getAttributeListAttribute() { + if (attributeListAttribute == null) { + // Linux NTFS docs say there can only be one of these, so I'll believe them. + attributeListAttribute = + (AttributeListAttribute) findStoredAttributeByType(NTFSAttribute.Types.ATTRIBUTE_LIST); + } + + return attributeListAttribute; + } + /** * Gets a collection of all attributes in this file record, including any attributes * which are stored in other file records referenced from an $ATTRIBUTE_LIST attribute. @@ -402,15 +417,27 @@ private NTFSAttribute findStoredAttributeByType(int typeID) { public synchronized List getAllAttributes() { if (attributeList == null) { try { - if (attributeListAttribute == null) { + if (getAttributeListAttribute() == null) { log.debug("All attributes stored"); attributeList = new ArrayList(getAllStoredAttributes()); } else { log.debug("Attributes in attribute list"); - readAttributeListAttributes(); + attributeList = readAttributeListAttributes(new FileRecordSupplier() { + @Override + public FileRecord getRecord(long referenceNumber) throws IOException { + // When reading the MFT itself don't attempt to check the index is in range + // (we won't know the total MFT length yet) + MasterFileTable mft = getVolume().getMFT(); + return getReferenceNumber() == MasterFileTable.SystemFiles.MFT + ? mft.getRecordUnchecked(referenceNumber) + : mft.getRecord(referenceNumber); + } + }); } } catch (Exception e) { - log.error("Error getting attributes for entry: " + this, e); + log.error("Error getting attributes for file record: " + referenceNumber + + ", returning stored attributes", e); + attributeList = new ArrayList(getAllStoredAttributes()); } } @@ -649,24 +676,47 @@ public void readData(int attributeType, String streamName, long fileOffset, byte @Override public String toString() { + String fileName = null; + + try { + // Only look at stored attributes to determine the file name to avoid a possible stack overflow + for (NTFSAttribute attribute : getAllStoredAttributes()) { + if (attribute.getAttributeType() == NTFSAttribute.Types.FILE_NAME) { + FileNameAttribute fileNameAttribute = (FileNameAttribute) attribute; + if (fileName == null || fileNameAttribute.getNameSpace() == FileNameAttribute.NameSpace.WIN32) { + fileName = fileNameAttribute.getFileName(); + } + } + } + } catch (Exception e) { + log.debug("Error getting file name for file record: " + referenceNumber, e); + } + if (isInUse()) { - return String.format("FileRecord [%d fileName='%s']", referenceNumber, getFileName()); + return String.format("FileRecord [%d name='%s']", referenceNumber, fileName); } else { - return String.format("FileRecord [%d unused]", referenceNumber); + return String.format("FileRecord [%d unused name='%s']", referenceNumber, fileName); } } /** * Reads in all attributes referenced by the attribute-list attribute. + * + * @param recordSupplier the FILE record supplier. + * @return the list of attributes. */ - private synchronized void readAttributeListAttributes() { + private List readAttributeListAttributes(FileRecordSupplier recordSupplier) { Iterator entryIterator; try { + AttributeListAttribute attributeListAttribute = getAttributeListAttribute(); + if (attributeListAttribute == null) { + return Collections.emptyList(); + } entryIterator = attributeListAttribute.getAllEntries(); } catch (Exception e) { - throw new IllegalStateException("Error getting attributes from attribute list, file record " + - FileRecord.this, e); + throw new IllegalStateException("Error getting attributes from attribute list, file record: " + + referenceNumber, e); } AttributeListBuilder attributeListBuilder = new AttributeListBuilder(); @@ -682,22 +732,28 @@ private synchronized void readAttributeListAttributes() { attribute = findStoredAttributeByID(entry.getAttributeID()); attributeListBuilder.add(attribute); } else { - log.debug("Looking up MFT entry for: " + entry.getFileReferenceNumber()); - - // When reading the MFT itself don't attempt to check the index is in range (we won't know the total - // MFT length yet) - MasterFileTable mft = getVolume().getMFT(); - FileRecord holdingRecord = getReferenceNumber() == MasterFileTable.SystemFiles.MFT - ? mft.getRecordUnchecked(entry.getFileReferenceNumber()) - : mft.getRecord(entry.getFileReferenceNumber()); + if (entry.getFileReferenceNumber() == 0) { + log.debug("Skipping lookup for entry: " + entry); + continue; + } - attribute = holdingRecord.findStoredAttributeByID(entry.getAttributeID()); + if (log.isDebugEnabled()) { + log.debug("Looking up MFT entry for: " + entry.getFileReferenceNumber()); + } - if (attribute == null) { - log.error(String.format("Failed to find an attribute matching entry '%s' in the holding record", + FileRecord holdingRecord = recordSupplier.getRecord(entry.getFileReferenceNumber()); + if (holdingRecord == null) { + log.error(String.format("Failed to look up holding record %d for entry '%s'", referenceNumber, entry)); } else { - attributeListBuilder.add(attribute); + attribute = holdingRecord.findStoredAttributeByID(entry.getAttributeID()); + + if (attribute == null) { + log.error(String.format("Failed to find an attribute matching entry '%s' in the holding " + + "record, ref=%d", entry, referenceNumber)); + } else { + attributeListBuilder.add(attribute); + } } } } catch (Exception e) { @@ -706,7 +762,7 @@ private synchronized void readAttributeListAttributes() { } } - attributeList = attributeListBuilder.toList(); + return attributeListBuilder.toList(); } /** diff --git a/fs/src/fs/org/jnode/fs/ntfs/FileRecordSupplier.java b/fs/src/fs/org/jnode/fs/ntfs/FileRecordSupplier.java new file mode 100644 index 0000000000..061067a986 --- /dev/null +++ b/fs/src/fs/org/jnode/fs/ntfs/FileRecordSupplier.java @@ -0,0 +1,17 @@ +package org.jnode.fs.ntfs; + +import java.io.IOException; + +/** + * A FILE record supplier. + */ +public interface FileRecordSupplier { + /** + * Gets a record. + * + * @param referenceNumber the reference number. + * @return the record, or {@code null} if the record cannot be looked up. + * @throws IOException if an error occurs. + */ + FileRecord getRecord(long referenceNumber) throws IOException; +} diff --git a/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java b/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java index 6c671867ef..ffedd04357 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java +++ b/fs/src/fs/org/jnode/fs/ntfs/MasterFileTable.java @@ -142,15 +142,14 @@ public MasterFileTable(NTFSVolume volume, byte[] buffer, int offset) throws IOEx * Creates a new MFT instance. * * @param volume the NTFS volume. - * @param bytesPerSector the bytes per-sector. * @param clusterSize the cluster size. * @param strictFixUp indicates whether to throw an exception if a fix-up error is detected. * @param buffer the buffer to read from. * @param offset the offset to read at. * @throws IOException if an error occurs creating the MFT. */ - public MasterFileTable(NTFSVolume volume, int bytesPerSector, int clusterSize, boolean strictFixUp, byte[] buffer, int offset) throws IOException { - super(volume, bytesPerSector, clusterSize, strictFixUp, SystemFiles.MFT, buffer, offset); + public MasterFileTable(NTFSVolume volume, int clusterSize, boolean strictFixUp, byte[] buffer, int offset) throws IOException { + super(volume, clusterSize, strictFixUp, SystemFiles.MFT, buffer, offset); } /** diff --git a/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java b/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java index d58bb1d224..4014d519c8 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java +++ b/fs/src/fs/org/jnode/fs/ntfs/NTFSRecord.java @@ -62,36 +62,27 @@ public static class Magic { public static final int INDX = 0x58444e49; } - /** - * The bytes-pre-sector in this NTFS volume. - */ - private final int bytesPerSector; - /** * Creates a new record. * - * @param bytesPerSector the bytes-pre-sector in this NTFS volume. * @param strictFixUp indicates whether an exception should be throw if fix-up values don't match. * @param buffer the buffer to read from. * @param offset the offset in the buffer to read from. */ - public NTFSRecord(int bytesPerSector, boolean strictFixUp, byte[] buffer, int offset) throws IOException { + public NTFSRecord(boolean strictFixUp, byte[] buffer, int offset) throws IOException { super(buffer, offset); - this.bytesPerSector = bytesPerSector; fixUp(strictFixUp); } /** * Creates a new record. * - * @param bytesPerSector the bytes-pre-sector in this NTFS volume. * @param strictFixUp indicates whether an exception should be throw if fix-up values don't match. * @param parent the parent structure. * @param offset the offset in the parent to read from. */ - public NTFSRecord(int bytesPerSector, boolean strictFixUp, NTFSStructure parent, int offset) throws IOException { + public NTFSRecord(boolean strictFixUp, NTFSStructure parent, int offset) throws IOException { super(parent, offset); - this.bytesPerSector = bytesPerSector; fixUp(strictFixUp); } @@ -138,7 +129,7 @@ private void fixUp(boolean strictFixUp) throws IOException { // header for (int i = 1/* intended */; i < usnCount; i++) { - final int bufOffset = (i * bytesPerSector) - 2; + final int bufOffset = (i * 512) - 2; final int usnOffset = updateSequenceOffset + (i * 2); if (getUInt16(bufOffset) == usn) { setUInt16(bufOffset, getUInt16(usnOffset)); diff --git a/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java b/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java index a9f2c758d4..b70dd6e435 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java +++ b/fs/src/fs/org/jnode/fs/ntfs/NTFSStructure.java @@ -181,7 +181,7 @@ public final int getInt24(int offset) { } /** - * Read n signed 32-bit integer from a given offset. + * Read a signed 32-bit integer from a given offset. * * @param offset * @return @@ -191,7 +191,17 @@ public final int getInt32(int offset) { } /** - * Read n signed 48-bit integer from a given offset. + * Read a signed 40-bit integer from a given offset. + * + * @param offset + * @return + */ + public final long getInt40(int offset) { + return LittleEndian.getInt40(buffer, this.offset + offset); + } + + /** + * Read a signed 48-bit integer from a given offset. * * @param offset * @return @@ -201,7 +211,7 @@ public final long getInt48(int offset) { } /** - * Read n signed 64-bit integer from a given offset. + * Read a signed 64-bit integer from a given offset. * * @param offset * @return diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java index 4c7fcbed5e..d5e2a2a48a 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListBuilder.java @@ -58,6 +58,10 @@ public class AttributeListBuilder { * @param attribute the attribute to add. */ public void add(NTFSAttribute attribute) { + if (attribute == null) { + return; + } + if (attribute.isResident()) { attributeList.add(attribute); } else { diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java index df95abd3eb..d70598671c 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/AttributeListEntry.java @@ -131,16 +131,11 @@ public String getName() { @Override public String toString() { - StringBuilder builder = new StringBuilder(super.toString()); - builder.append("[type=").append(getType()); - builder.append(",name=").append(getName()); - if (getStartingVCN() == 0) { - builder.append(",resident"); - } else { - builder.append(",ref=").append(getFileReferenceNumber()); - builder.append(",vcn=").append(getStartingVCN()); - } - builder.append(",id=").append(getAttributeID()).append("]"); - return builder.toString(); + return String.format("attr-list-entry:[type=0x%x,name='%s',ref=%d,%s,id=0x%x]", + getType(), + getName(), + getFileReferenceNumber(), + getStartingVCN() == 0 ? "resident" : "vcn=" + getStartingVCN(), + getAttributeID()); } } diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java index 900fba8069..92a4f89cae 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSAttribute.java @@ -26,6 +26,7 @@ import org.jnode.fs.ntfs.StandardInformationAttribute; import org.jnode.fs.ntfs.index.IndexAllocationAttribute; import org.jnode.fs.ntfs.index.IndexRootAttribute; +import org.jnode.fs.util.FSUtils; /** * @author Chira @@ -173,6 +174,18 @@ public int getSize() { return getUInt32AsInt(4); } + /** + * Generates a hex dump of the attribute's data. + * + * @return the hex dump. + */ + public String hexDump() { + int length = getBuffer().length - getOffset(); + byte[] data = new byte[length]; + getData(0, data, 0, data.length); + return FSUtils.toString(data); + } + /** * Generates a debug string for the attribute. * diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java index fc8bf8bd6c..4b80d78715 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSNonResidentAttribute.java @@ -26,7 +26,6 @@ import org.jnode.fs.ntfs.NTFSVolume; import org.jnode.fs.ntfs.datarun.DataRunDecoder; import org.jnode.fs.ntfs.datarun.DataRunInterface; -import org.jnode.fs.util.FSUtils; /** * An NTFS file attribute that has its data stored outside the attribute. @@ -159,10 +158,10 @@ public int readVCN(long vcn, byte[] dst, int dstOffset, int nrClusters) throws I final int clusterSize = volume.getClusterSize(); int readClusters = 0; for (DataRunInterface dataRun : getDataRuns()) { - readClusters += dataRun.readClusters(vcn, dst, dstOffset, nrClusters, clusterSize, volume); - if (readClusters == nrClusters) { + if (readClusters >= nrClusters) { break; } + readClusters += dataRun.readClusters(vcn, dst, dstOffset, nrClusters, clusterSize, volume); } if (log.isDebugEnabled()) { @@ -172,18 +171,6 @@ public int readVCN(long vcn, byte[] dst, int dstOffset, int nrClusters) throws I return readClusters; } - /** - * Generates a hex dump of the attribute's data. - * - * @return the hex dump. - */ - public String hexDump() { - int length = getBuffer().length - getOffset(); - byte[] data = new byte[length]; - getData(0, data, 0, data.length); - return FSUtils.toString(data); - } - @Override public String toString() { return String.format("[attribute (non-res) type=x%x name'%s' size=%d runs=%d]", getAttributeType(), diff --git a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java index 1069f65ae5..4ca936b91c 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java +++ b/fs/src/fs/org/jnode/fs/ntfs/attribute/NTFSResidentAttribute.java @@ -21,7 +21,6 @@ package org.jnode.fs.ntfs.attribute; import org.jnode.fs.ntfs.FileRecord; -import org.jnode.fs.util.FSUtils; /** * An NTFS file attribute that has its data stored inside the attribute. @@ -59,17 +58,6 @@ public int getAttributeLength() { return (int) getUInt32(0x10); } - /** - * Generates a hex dump of the attribute's data. - * - * @return the hex dump. - */ - public String hexDump() { - byte[] attributeData = new byte[getAttributeLength()]; - getData(getAttributeOffset(), attributeData, 0, attributeData.length); - return FSUtils.toString(attributeData); - } - @Override public String toString() { return String.format("[attribute (res) type=x%x name'%s' size=%d]", getAttributeType(), getAttributeName(), diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java index e4f41e5946..b9ef4b371f 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/CompressedDataRun.java @@ -26,6 +26,7 @@ import java.util.List; import org.apache.log4j.Logger; import org.jnode.fs.ntfs.NTFSVolume; +import org.jnode.fs.util.FSUtils; import org.jnode.util.LittleEndian; /** @@ -70,7 +71,7 @@ public CompressedDataRun(DataRun compressedRun, int compressionUnitSize) { * * @return the length of the run in clusters. */ - public int getLength() { + public long getLength() { return compressionUnitSize; } @@ -116,7 +117,7 @@ public int readClusters(long vcn, byte[] dst, int dstOffset, int nrClusters, int // This is the actual number of stored clusters after compression. // If the number of stored clusters is the same as the compression unit size, // then the data can be read directly without decompressing it. - final int compClusters = compressedRun.getLength(); + int compClusters = FSUtils.checkedCast(compressedRun.getLength()); if (compClusters == compressionUnitSize) { return compressedRun.readClusters(vcn, dst, dstOffset, compClusters, clusterSize, volume); } diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java index 015b01e86e..3d10b31b39 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRun.java @@ -25,15 +25,13 @@ import org.apache.log4j.Logger; import org.jnode.fs.ntfs.NTFSStructure; import org.jnode.fs.ntfs.NTFSVolume; +import org.jnode.fs.util.FSUtils; /** * @author Ewout Prangsma (epr@users.sourceforge.net) */ public final class DataRun implements DataRunInterface { - /** - * Type of this datarun - */ /** * logger */ @@ -48,7 +46,7 @@ public final class DataRun implements DataRunInterface { /** * Length of datarun in clusters */ - private final int length; + private final long length; /** * Flag indicating that the data is not stored on disk but is all zero. @@ -74,7 +72,7 @@ public final class DataRun implements DataRunInterface { * @param size Size in bytes of this datarun descriptor * @param vcn First VCN of this datarun. */ - public DataRun(long cluster, int length, boolean sparse, int size, long vcn) { + public DataRun(long cluster, long length, boolean sparse, int size, long vcn) { this.cluster = cluster; this.length = length; this.sparse = sparse; @@ -115,12 +113,12 @@ public DataRun(NTFSStructure attr, int offset, long vcn, long previousLCN) { length = dataRunStructure.getUInt24(1); break; case 0x04: - length = dataRunStructure.getUInt32AsInt(1); + length = dataRunStructure.getUInt32(1); break; default: throw new IllegalArgumentException("Invalid length length " + lenlen); } - final int cluster; + final long cluster; switch (clusterlen) { case 0x00: sparse = true; @@ -138,6 +136,9 @@ public DataRun(NTFSStructure attr, int offset, long vcn, long previousLCN) { case 0x04: cluster = dataRunStructure.getInt32(1 + lenlen); break; + case 0x05: + cluster = dataRunStructure.getInt40(1 + lenlen); + break; default: throw new IllegalArgumentException("Unknown cluster length " + clusterlen); } @@ -176,7 +177,7 @@ public int getSize() { * * @return Returns the length. */ - public int getLength() { + public long getLength() { return length; } @@ -216,7 +217,7 @@ public int readClusters(long vcn, byte[] dst, int dstOffset, int nrClusters, int NTFSVolume volume) throws IOException { final long myFirstVcn = getFirstVcn(); - final int myLength = getLength(); + final long myLength = getLength(); final long myLastVcn = getLastVcn(); final long reqLastVcn = vcn + nrClusters - 1; @@ -235,13 +236,13 @@ public int readClusters(long vcn, byte[] dst, int dstOffset, int nrClusters, int final int actDstOffset; // Actual dst offset if (vcn < myFirstVcn) { final int vcnDelta = (int) (myFirstVcn - vcn); - count = Math.min(nrClusters - vcnDelta, myLength); + count = FSUtils.checkedCast(Math.min(nrClusters - vcnDelta, myLength)); actDstOffset = dstOffset + (vcnDelta * clusterSize); actCluster = getCluster(); } else { // vcn >= myFirstVcn final int vcnDelta = (int) (vcn - myFirstVcn); - count = Math.min(nrClusters, myLength - vcnDelta); + count = FSUtils.checkedCast(Math.min(nrClusters, myLength - vcnDelta)); actDstOffset = dstOffset; actCluster = getCluster() + vcnDelta; } diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java index 484d17fbe1..0d21276415 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunDecoder.java @@ -59,7 +59,7 @@ public class DataRunDecoder { /** * The last compressed run size. */ - private int lastCompressedSize = 0; + private long lastCompressedSize = 0; /** * The last compressed run to append to. @@ -106,7 +106,7 @@ public void readDataRuns(NTFSStructure parent, int offsetInParent) { // Also the sparse run following a compressed run can be coalesced with a subsequent 'real' sparse // run. So add that in if we hit one if (dataRun.getLength() + lastCompressedSize > compressionUnit) { - int length = dataRun.getLength() - (compressionUnit - lastCompressedSize); + long length = dataRun.getLength() - (compressionUnit - lastCompressedSize); dataRuns.add(new DataRun(0, length, true, 0, vcn)); this.numberOfVCNs += length; @@ -120,11 +120,11 @@ public void readDataRuns(NTFSStructure parent, int offsetInParent) { // coalesced into a single run and even coalesced into the next compressed run. In that case the // compressed run needs to be split off - int remainder = dataRun.getLength() % compressionUnit; + long remainder = dataRun.getLength() % compressionUnit; if (remainder != 0) { // Uncompressed run coalesced with compressed run. First add in the uncompressed portion: - int uncompressedLength = dataRun.getLength() - remainder; + long uncompressedLength = dataRun.getLength() - remainder; DataRun uncompressed = new DataRun(dataRun.getCluster(), uncompressedLength, false, 0, vcn); dataRuns.add(uncompressed); vcn += uncompressedLength; @@ -133,7 +133,8 @@ public void readDataRuns(NTFSStructure parent, int offsetInParent) { // Next add in the compressed portion DataRun compressedRun = new DataRun(dataRun.getCluster() + uncompressedLength, remainder, false, 0, vcn); - dataRuns.add(new CompressedDataRun(compressedRun, compressionUnit)); + lastCompressedRun = new CompressedDataRun(compressedRun, compressionUnit); + dataRuns.add(lastCompressedRun); expectingSparseRunNext = true; lastCompressedSize = remainder; diff --git a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java index 989a18d21d..6686b427fc 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java +++ b/fs/src/fs/org/jnode/fs/ntfs/datarun/DataRunInterface.java @@ -33,7 +33,7 @@ public interface DataRunInterface { * * @return the length of the run in clusters. */ - int getLength(); + long getLength(); /** * Reads clusters from this datarun. diff --git a/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java b/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java index 64e95f7e05..3e20e0beb7 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java +++ b/fs/src/fs/org/jnode/fs/ntfs/index/IndexBlock.java @@ -42,7 +42,7 @@ final class IndexBlock extends NTFSRecord { * @param offset */ public IndexBlock(FileRecord parentFileRecord, byte[] buffer, int offset) throws IOException { - super(parentFileRecord.getVolume().getBootRecord().getBytesPerSector(), true, buffer, offset); + super(true, buffer, offset); this.parentFileRecord = parentFileRecord; } diff --git a/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java b/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java index 918607f4af..7f4b0d5dd1 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java +++ b/fs/src/fs/org/jnode/fs/ntfs/logfile/LogFile.java @@ -11,7 +11,6 @@ import java.util.TreeMap; import org.apache.log4j.Logger; import org.jnode.fs.ntfs.FileRecord; -import org.jnode.fs.ntfs.NTFSVolume; import org.jnode.fs.ntfs.attribute.NTFSAttribute; import org.jnode.fs.util.FSUtils; import org.jnode.util.LittleEndian; @@ -96,7 +95,7 @@ public LogFile(FileRecord fileRecord) throws IOException { fileRecord.readData(0, logFileBuffer, 0, (int) logFileLength); // Read in the restart area info - restartPageHeader = getNewestRestartPageHeader(fileRecord.getVolume(), logFileBuffer); + restartPageHeader = getNewestRestartPageHeader(logFileBuffer); int restartAreaOffset = restartPageHeader.getOffset() + restartPageHeader.getRestartOffset(); logPageSize = restartPageHeader.getLogPageSize(); restartArea = new RestartArea(logFileBuffer, restartAreaOffset); @@ -125,7 +124,7 @@ public LogFile(FileRecord fileRecord) throws IOException { } } - oldestPageOffset = findOldestPageOffset(fileRecord.getVolume()); + oldestPageOffset = findOldestPageOffset(); } /** @@ -266,11 +265,10 @@ private long getNextRecordOffset(LogRecord logRecord, long recordOffset) { /** * Finds the offset to the oldest page, i.e. the one with the lowest LSN. * - * @param volume the volume that holds the log file. * @return the offset to the oldest page. * @throws IOException if an error occurs. */ - private int findOldestPageOffset(NTFSVolume volume) throws IOException { + private int findOldestPageOffset() throws IOException { TreeMap lsnPageMap = new TreeMap(); Map pageOffsetMap = new HashMap(); @@ -286,7 +284,7 @@ private int findOldestPageOffset(NTFSVolume volume) throws IOException { continue; } - RecordPageHeader pageHeader = new RecordPageHeader(volume, logFileBuffer, offset); + RecordPageHeader pageHeader = new RecordPageHeader(logFileBuffer, offset); offsetPageMap.put(offset, pageHeader); // If the last-end-LSN is zero then the page only contains data from the log record on the last page. I.e. @@ -304,20 +302,19 @@ private int findOldestPageOffset(NTFSVolume volume) throws IOException { /** * Gets the restart page header that corresponds to the restart page with the highest current LSN. * - * @param volume the volume that holds the log file. * @param buffer the buffer to read from. * @return the header. * @throws IOException if an error occurs. */ - private RestartPageHeader getNewestRestartPageHeader(NTFSVolume volume, byte[] buffer) throws IOException { - RestartPageHeader restartPageHeader1 = new RestartPageHeader(volume, buffer, 0); + private RestartPageHeader getNewestRestartPageHeader(byte[] buffer) throws IOException { + RestartPageHeader restartPageHeader1 = new RestartPageHeader(buffer, 0); if (!restartPageHeader1.isValid()) { throw new IllegalStateException("Restart header has invalid magic: " + restartPageHeader1.getMagic()); } else if (restartPageHeader1.getMagic() == RestartPageHeader.Magic.CHKD) { log.warn("First $LogFile restart header has check disk magic"); } - RestartPageHeader restartPageHeader2 = new RestartPageHeader(volume, buffer, restartPageHeader1.getLogPageSize()); + RestartPageHeader restartPageHeader2 = new RestartPageHeader(buffer, restartPageHeader1.getLogPageSize()); if (!restartPageHeader2.isValid()) { throw new IllegalStateException("Second restart header has invalid magic: " + restartPageHeader2.getMagic()); } else if (restartPageHeader2.getMagic() == RestartPageHeader.Magic.CHKD) { diff --git a/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java b/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java index d5e15514fb..106891048b 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java +++ b/fs/src/fs/org/jnode/fs/ntfs/logfile/RecordPageHeader.java @@ -2,7 +2,6 @@ import java.io.IOException; import org.jnode.fs.ntfs.NTFSRecord; -import org.jnode.fs.ntfs.NTFSVolume; /** * $LogFile record page header. @@ -29,13 +28,12 @@ public static class Magic { /** * Creates a new log file record page header. * - * @param volume the volume that contains this record. * @param buffer the buffer. * @param offset the offset. * @throws IOException if an error occurs during fixup. */ - public RecordPageHeader(NTFSVolume volume, byte[] buffer, int offset) throws IOException { - super(volume.getBootRecord().getBytesPerSector(), true, buffer, offset); + public RecordPageHeader(byte[] buffer, int offset) throws IOException { + super(true, buffer, offset); } /** diff --git a/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java b/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java index b8b29435fb..99f19e7676 100644 --- a/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java +++ b/fs/src/fs/org/jnode/fs/ntfs/logfile/RestartPageHeader.java @@ -2,7 +2,6 @@ import java.io.IOException; import org.jnode.fs.ntfs.NTFSRecord; -import org.jnode.fs.ntfs.NTFSVolume; /** * $LogFile restart page header @@ -29,13 +28,12 @@ public static class Magic { /** * Creates a new log file restart page header. * - * @param volume the volume that contains this record. * @param buffer the buffer. * @param offset the offset to create the structure at. * @throws IOException if an error occurs during fixup. */ - public RestartPageHeader(NTFSVolume volume, byte[] buffer, int offset) throws IOException { - super(volume.getBootRecord().getBytesPerSector(), true, buffer, offset); + public RestartPageHeader(byte[] buffer, int offset) throws IOException { + super(true, buffer, offset); } /** diff --git a/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java b/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java index 6b18842daa..f1ba63a752 100644 --- a/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java +++ b/fs/src/fs/org/jnode/fs/spi/UnixFSAccessRights.java @@ -22,12 +22,9 @@ import java.security.Principal; import java.security.acl.Group; - import org.jnode.fs.FSAccessRights; import org.jnode.fs.FileSystem; -import com.sun.security.auth.UserPrincipal; - /** * * @author Fabien DUMINY (fduminy at jnode.org) @@ -50,7 +47,7 @@ public UnixFSAccessRights(FileSystem filesystem) { this.filesystem = filesystem; // TODO manages users & groups in JNode - owner = new UserPrincipal("root"); + owner = new UnixFSPrincipal("root"); group = new UnixFSGroup("admins"); group.addMember(owner); } diff --git a/fs/src/fs/org/jnode/fs/spi/UnixFSPrincipal.java b/fs/src/fs/org/jnode/fs/spi/UnixFSPrincipal.java new file mode 100644 index 0000000000..173b38c7f7 --- /dev/null +++ b/fs/src/fs/org/jnode/fs/spi/UnixFSPrincipal.java @@ -0,0 +1,58 @@ +/* + * $Id$ + * + * Copyright (C) 2003-2015 JNode.org + * + * This library is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published + * by the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public + * License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with this library; If not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +package org.jnode.fs.spi; + +import java.security.Principal; + +public class UnixFSPrincipal implements Principal { + + private final String name; + + public UnixFSPrincipal(String name) { + this.name = name; + } + + @Override + public String getName() { + return name; + } + + @Override + public boolean equals(Object object) { + if (this == object) { + return true; + } + if (object instanceof UnixFSPrincipal) { + return name.equals(((UnixFSPrincipal) object).getName()); + } + return false; + } + + @Override + public int hashCode() { + return name.hashCode(); + } + + @Override + public String toString() { + return name; + } +} diff --git a/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java b/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java index 72c95329ff..5c352d0d61 100644 --- a/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java +++ b/fs/src/test/org/jnode/test/fs/hfsplus/HfsPlusFileSystemTest.java @@ -22,16 +22,21 @@ import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; import org.jnode.driver.Device; import org.jnode.driver.block.FileDevice; +import org.jnode.fs.FSDirectory; import org.jnode.fs.hfsplus.HFSPlusParams; +import org.jnode.fs.hfsplus.HfsPlusEntry; +import org.jnode.fs.hfsplus.HfsPlusFile; import org.jnode.fs.hfsplus.HfsPlusFileSystem; import org.jnode.fs.hfsplus.HfsPlusFileSystemType; import org.jnode.fs.hfsplus.SuperBlock; +import org.jnode.fs.hfsplus.attributes.AttributeData; +import org.jnode.fs.service.FileSystemService; import org.jnode.test.fs.DataStructureAsserts; -import org.jnode.fs.FSDirectory; import org.jnode.test.fs.FileSystemTestUtils; -import org.jnode.fs.service.FileSystemService; import org.jnode.test.support.TestUtils; import org.junit.Assert; import org.junit.Before; @@ -172,6 +177,54 @@ public void testDiskWithIncorrectCompressedFileOnFile() throws Exception { DataStructureAsserts.assertStructure(fs, expectedStructure); } + @Test + public void testDiskAttributes() throws Exception { + device = new FileDevice(FileSystemTestUtils.getTestFile("test/fs/hfsplus/attributes.dd"), "r"); + HfsPlusFileSystemType type = fss.getFileSystemType(HfsPlusFileSystemType.ID); + HfsPlusFileSystem fs = type.create(device, true); + + String expectedStructure = + "type: HFS+ vol:attributes total:99983360 free:97062912\n" + + " /; \n" + + " .DS_Store; 6148; 7e2a612ff5e605e73b2078557c9aa5c5\n" + + " .fseventsd; \n" + + " 0000000000219f17; 135; b518731969693d10590124b27c8b50cf\n" + + " 0000000000219f18; 70; e899edc609e2d4bac697bc207b7ee5ad\n" + + " 0000000000219f19; 38; ebb12e59f60c213f883d07e17ff3f2d5\n" + + " 0000000000219f1a; 71; 4732d018d8011a207fce3f127a2e6e5d\n" + + " 0000000000219f1b; 38; ebb12e59f60c213f883d07e17ff3f2d5\n" + + " 0000000000219f1c; 71; b619c8d189af0a632e91dec707774d44\n" + + " fseventsd-uuid; 36; 8f91e1c548e1eed2edb44694d8a5c5db\n" + + " .HFS+ Private Directory Data\r; \n" + + " .journal; 524288; 3e87e55c8d321c611f18197770a523e7\n" + + " .journal_info_block; 4096; 469270564228a832e83d2ad16e6d8edc\n" + + " isoparser-1.1.22.pom; 6652; 6c04a5fb9540f1b558dc9465ab3a7ad4\n" + + " \u0000\u0000\u0000\u0000HFS+ Private Data; \n"; + + DataStructureAsserts.assertStructure(fs, expectedStructure); + + HfsPlusEntry entry = (HfsPlusEntry) fs.getRootEntry().getDirectory().getEntry("isoparser-1.1.22.pom"); + HfsPlusFile file = (HfsPlusFile) entry.getFile(); + + List attributes = fs.getAttributes().getAllAttributes(file.getCatalogFile().getFileId()); + AttributeData whereFromAttr = fs.getAttributes().getAttribute(file.getCatalogFile().getFileId(), + "com.apple.metadata:kMDItemWhereFroms"); + ByteBuffer whereFromBuffer = ByteBuffer.allocate(6); + whereFromAttr.read(fs, 0, whereFromBuffer); + String whereFrom = new String(whereFromBuffer.array(), "UTF-8"); + + AttributeData quarantineAttr = fs.getAttributes().getAttribute(file.getCatalogFile().getFileId(), + "com.apple.quarantine"); + ByteBuffer quarantineBuffer = ByteBuffer.allocate((int) quarantineAttr.getSize()); + quarantineAttr.read(fs, 0, quarantineBuffer); + String quarantine = new String(quarantineBuffer.array(), "UTF-8"); + + Assert.assertEquals("[com.apple.metadata:kMDItemWhereFroms, com.apple.quarantine]", attributes.toString()); + Assert.assertEquals(144, whereFromAttr.getSize()); + Assert.assertEquals("bplist", whereFrom); + Assert.assertEquals("0081;5cc8ba96;Chrome;3792716B-4DE2-4A43-BFA3-516714DD5764", quarantine); + } + @Test public void testDiskCompressedHardlinks() throws Exception { device = new FileDevice(FileSystemTestUtils.getTestFile("test/fs/hfsplus/compressed-hardlinks.dd"), "r"); diff --git a/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java b/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java index 89bc4cb5c5..d75099a3f9 100644 --- a/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java +++ b/fs/src/test/org/jnode/test/fs/hfsplus/HfsUnicodeStringTest.java @@ -24,6 +24,8 @@ import org.junit.Test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; public class HfsUnicodeStringTest { private byte[] STRING_AS_BYTES_ARRAY = @@ -49,4 +51,35 @@ public void testConstructAsString() { } } + @Test + public void testEquals() { + HfsUnicodeString string1 = new HfsUnicodeString(STRING_AS_TEXT); + HfsUnicodeString string2 = new HfsUnicodeString(STRING_AS_TEXT); + HfsUnicodeString string3 = new HfsUnicodeString(null); + HfsUnicodeString string4 = new HfsUnicodeString(null); + + assertEquals(string1, string2); + assertEquals(string3, string4); + assertFalse(string1.equals(string3)); + assertFalse(string4.equals(string2)); + } + + @Test + public void testCompareTo() { + HfsUnicodeString nullStr = new HfsUnicodeString(null); + HfsUnicodeString emptyStr = new HfsUnicodeString(""); + HfsUnicodeString string1 = new HfsUnicodeString("test"); + HfsUnicodeString string2 = new HfsUnicodeString("test"); + HfsUnicodeString longerStr = new HfsUnicodeString("testzzz"); + + assertEquals(-1, nullStr.compareTo(emptyStr)); + assertEquals(1, emptyStr.compareTo(nullStr)); + + assertEquals(0, string1.compareTo(string2)); + assertTrue(string1.compareTo(longerStr) < 0); + assertTrue(longerStr.compareTo(string1) > 0); + + assertEquals(1, string1.compareTo(nullStr)); + assertEquals(-1, nullStr.compareTo(string1)); + } } diff --git a/fs/src/test/org/jnode/test/fs/hfsplus/attributes.dd.gz b/fs/src/test/org/jnode/test/fs/hfsplus/attributes.dd.gz new file mode 100644 index 0000000000..917efa1e1a Binary files /dev/null and b/fs/src/test/org/jnode/test/fs/hfsplus/attributes.dd.gz differ diff --git a/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java b/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java index 6ac751e871..1eb9028990 100644 --- a/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java +++ b/fs/src/test/org/jnode/test/fs/ntfs/NTFSDataRunDecoderTest.java @@ -414,6 +414,66 @@ public void testCombiningSubsequentAttributesRuns() { assertDataRuns(dataRuns, expectedRuns); } + @Test + public void testDataRunWithLargeNegativeOffset() { + // Arrange + byte[] buffer = toByteArray( + "33 C0 3B 01 00 00 0C 43 14 C8 00 2C 43 F5 1E 43 F1 15 01 63 63 EB 25 42 A7 77 FA 5E E8 " + + "0E 42 94 4A 6E 7B BA 0D 43 70 CA 00 09 FF 50 19 43 A5 0F 01 FC FF D1 B3 42 16 65 AE 99 F8 2A 43 " + + "6C C8 00 EA 1D 94 15 43 0C C8 00 BF CB 9F B3 43 1D D2 00 71 EE BA 0D 43 03 C8 00 D9 43 B2 00 43 " + + "32 C9 00 6C F8 B1 5D 43 08 C8 00 AF E4 2A 8E 43 06 C8 00 E2 CB 2F 1F 43 25 C8 00 66 A1 F0 30 43 " + + "0F C8 00 2B 04 B4 08 43 2D C9 00 D0 A6 87 E0 43 1A C8 00 0B 97 E0 29 52 C2 08 C9 D2 B8 7F FF 43 " + + "00 88 00 68 5C CC 6B 00"); + DataRunDecoder dataRunDecoder = new DataRunDecoder(false, 1); + + // Act + dataRunDecoder.readDataRuns(new NTFSStructure(buffer, 0), 0); + List dataRuns = dataRunDecoder.getDataRuns(); + + // Assert + String expectedRuns = + "[data-run vcn:0-80831 cluster:786432]\n" + + "[data-run vcn:80832-132051 cluster:520176428]\n" + + "[data-run vcn:132052-203204 cluster:1156359823]\n" + + "[data-run vcn:203205-233835 cluster:1406469513]\n" + + "[data-run vcn:233836-252927 cluster:1636794615]\n" + + "[data-run vcn:252928-304751 cluster:2061533184]\n" + + "[data-run vcn:304752-374292 cluster:783450108]\n" + + "[data-run vcn:374293-400170 cluster:1504385450]\n" + + "[data-run vcn:400171-451478 cluster:1866413972]\n" + + "[data-run vcn:451479-502690 cluster:585040723]\n" + + "[data-run vcn:502691-556479 cluster:815395268]\n" + + "[data-run vcn:556480-607682 cluster:827078045]\n" + + "[data-run vcn:607683-659188 cluster:2399022601]\n" + + "[data-run vcn:659189-710396 cluster:489231032]\n" + + "[data-run vcn:710397-761602 cluster:1012457114]\n" + + "[data-run vcn:761603-812839 cluster:1833533440]\n" + + "[data-run vcn:812840-864054 cluster:1979548715]\n" + + "[data-run vcn:864055-915555 cluster:1451567867]\n" + + "[data-run vcn:915556-966781 cluster:2154152454]\n" + + "[data-run vcn:966782-969023 cluster:2004175]\n" + + "[data-run vcn:969024-1003839 cluster:1810559287]\n"; + assertDataRuns(dataRuns, expectedRuns); + } + + @Test + public void testCompressedExpectingSparseAfterMerge() { + // Arrange + byte[] buffer = toByteArray( + "41 13 D5 68 A2 0B 21 09 68 FF 01 04 00"); + DataRunDecoder dataRunDecoder = new DataRunDecoder(true, 16); + + // Act + dataRunDecoder.readDataRuns(new NTFSStructure(buffer, 0), 0); + List dataRuns = dataRunDecoder.getDataRuns(); + + // Assert + String expectedRuns = + "[data-run vcn:0-15 cluster:195193045]\n" + + "[compressed-run vcn:16-31 [[data-run vcn:16-18 cluster:195193061], [data-run vcn:19-27 cluster:195192893]]]\n"; + assertDataRuns(dataRuns, expectedRuns); + } + /** * Asserts the list of data runs is correct. *