GP-3946 Implement SquashFS FileSystem

This commit is contained in:
Ghidra78 2023-12-14 14:19:21 -05:00
parent 58e22a6f7b
commit 02642c1559
20 changed files with 2731 additions and 0 deletions

View File

@ -0,0 +1,97 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
public class SquashBasicDirectoryInode extends SquashInode {
// Offset into the directory table where the metadata block for this inode starts
protected long blockIndex;
// The number of hard links to this directory
protected long hardLinkCount;
// The total uncompressed size of this directory listing
// Basic is 16 bits, extended is 32 bits (unsigned)
// NOTE: This value is 3 bytes greater than the actual listing as Linux creates "." and ".." directories
protected long uncompressedFileSize;
// Offset into the directory table metadata block where this directory listing starts
protected int blockOffset;
// The inode number of the parent of this directory (for root directory, this should be 0)
protected int parentInodeNumber;
// Whether or not the parent directory is root
protected boolean parentIsRoot = false;
/**
* Represents a SquashFS basic directory inode
* @param reader A binary reader with pointer index at the start of the inode data
* @param superBlock The SuperBlock for the current archive
* @param isExtended True if the constructor is being called by a subclass
* @throws IOException Any read operation failure
*/
public SquashBasicDirectoryInode(BinaryReader reader, SquashSuperBlock superBlock,
boolean isExtended) throws IOException {
// Assign common inode header values
super(reader, superBlock);
// If the class if being extended, handle the directory-specific values in that constructor
if (isExtended) {
return;
}
// Assign basic directory specific values
blockIndex = reader.readNextUnsignedInt();
hardLinkCount = reader.readNextUnsignedInt();
uncompressedFileSize = reader.readNextUnsignedShort();
blockOffset = reader.readNextUnsignedShort();
parentInodeNumber = reader.readNextUnsignedIntExact();
// Determine if the parent of the current inode is root
parentIsRoot = parentInodeNumber == superBlock.getInodeCount() + 1;
}
public int getParentInodeNumber() {
return parentInodeNumber;
}
public boolean isParentRoot() {
return parentIsRoot;
}
public long getIndex() {
return blockIndex;
}
public long getHardLinkCount() {
return hardLinkCount;
}
public int getOffset() {
return blockOffset;
}
public long getUncompressedSize() {
return uncompressedFileSize;
}
}

View File

@ -0,0 +1,149 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
public class SquashBasicFileInode extends SquashInode {
// Offset into the archive where the first data block resides
// Basic is 32 bits, extended is 64 bits
protected long startBlockOffset;
// The index into the fragment table where the tail end of this file resides
// All bits are set if there is no fragment
protected int fragmentIndex;
// The offset within the uncompressed fragment block where the tail end of the file resides
protected int blockOffset;
// The total uncompressed size of this file
// Basic is 32 bits, extended is 64 bits
protected long fileSize;
// An array containing the number of bytes each block in the archive is
protected int[] blockSizes;
// The size of the tail end of the file
protected int tailEndSize = 0;
// The total number of blocks comprising the file
protected int numberOfBlocks = 0;
/**
* Represents a SquashFS basic file inode
* @param reader A binary reader with pointer index at the start of the inode data
* @param superBlock The SuperBlock for the current archive
* @param isExtended True if the constructor is being called by a subclass
* @throws IOException Any read operation failure
*/
public SquashBasicFileInode(BinaryReader reader, SquashSuperBlock superBlock,
boolean isExtended) throws IOException {
// Assign common inode header values
super(reader, superBlock);
// If the class if being extended, handle the file-specific values in that constructor
if (isExtended) {
return;
}
// Assign basic file specific values
startBlockOffset = reader.readNextUnsignedInt();
// If there are no fragments, skip the next two values
if (reader.peekNextInt() == -1) {
fragmentIndex = -1;
blockOffset = -1;
// Advance the reader position
reader.setPointerIndex(reader.getPointerIndex() + (BinaryReader.SIZEOF_INT * 2));
}
else {
fragmentIndex = reader.readNextUnsignedIntExact();
blockOffset = reader.readNextUnsignedIntExact();
}
fileSize = reader.readNextUnsignedInt();
setVars(reader, superBlock);
}
/**
* Calculate the derived variables for this file
* @param reader A binary reader with pointer index at the start of the inode data
* @param superBlock The superblock for the current archive
* @throws IOException Any read operation failure
*/
protected void setVars(BinaryReader reader, SquashSuperBlock superBlock) throws IOException {
// If the current inode uses fragments, the number of blocks is calculated differently
if (fragmentIndex == SquashConstants.INODE_NO_FRAGMENTS) {
numberOfBlocks =
(int) ((fileSize + superBlock.getBlockSize() - 1) / superBlock.getBlockSize());
}
else {
numberOfBlocks = (int) (fileSize / superBlock.getBlockSize());
tailEndSize = (int) (fileSize % superBlock.getBlockSize());
}
// Fetch and store the block sizes for the file
blockSizes = reader.readNextIntArray(numberOfBlocks);
}
public long getStartBlockOffset() {
return startBlockOffset;
}
public int getFragmentIndex() {
return fragmentIndex;
}
public int getBlockOffset() {
return blockOffset;
}
public long getFileSize() {
return fileSize;
}
public int getTailEndSize() {
return tailEndSize;
}
public int getNumberOfBlocks() {
return numberOfBlocks;
}
public int[] getBlockSizes() {
return blockSizes;
}
public long getCompressedFileSize() {
long compressedSize = 0;
for (int blockHeader : blockSizes) {
int size = blockHeader & ~SquashConstants.DATABLOCK_COMPRESSED_MASK;
compressedSize += size;
}
return compressedSize += tailEndSize;
}
}

View File

@ -0,0 +1,94 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
public final class SquashConstants {
// SquashFS magic bytes ("hsqs")
public static final byte[] MAGIC = { 0x68, 0x73, 0x71, 0x73 };
/**
* Compression types
*/
public static final int COMPRESSION_TYPE_GZIP = 1;
public static final int COMPRESSION_TYPE_LZMA = 2;
public static final int COMPRESSION_TYPE_LZO = 3;
public static final int COMPRESSION_TYPE_XZ = 4;
public static final int COMPRESSION_TYPE_LZ4 = 5;
public static final int COMPRESSION_TYPE_ZSTD = 6;
/*
* Superblock flag masks
*/
public static final int UNCOMPRESSED_INODES = 0x0001;
public static final int UNCOMPRESSED_DATA_BLOCKS = 0x0002;
public static final int UNUSED_FLAG = 0x0004;
public static final int UNCOMPRESSED_FRAGMENTS = 0x0008;
public static final int NO_FRAGMENTS = 0x0010;
public static final int ALWAYS_FRAGMENT = 0x0020;
public static final int NO_DUPLICATE_DATE = 0x0040;
public static final int EXPORT_TABLE_EXISTS = 0x0080;
public static final int UNCOMPRESSED_XATTRS = 0x0100;
public static final int NO_XATTRS = 0x0200;
public static final int COMPRESSION_OPTIONS_EXIST = 0x0400;
public static final int UNCOMPRESSED_IDS = 0x0800;
/**
* Inode Types
*/
public static final int INODE_TYPE_BASIC_DIRECTORY = 0x01;
public static final int INODE_TYPE_BASIC_FILE = 0x02;
public static final int INODE_TYPE_BASIC_SYMLINK = 0x03;
public static final int INODE_TYPE_BASIC_BLOCK_DEVICE = 0x04;
public static final int INODE_TYPE_BASIC_CHAR_DEVICE = 0x05;
public static final int INODE_TYPE_BASIC_FIFO = 0x06;
public static final int INODE_TYPE_BASIC_SOCKET = 0x07;
public static final int INODE_TYPE_EXTENDED_DIRECTORY = 0x08;
public static final int INODE_TYPE_EXTENDED_FILE = 0x09;
public static final int INODE_TYPE_EXTENDED_SYMLINK = 0x0A;
public static final int INODE_TYPE_EXTENDED_BLOCK_DEVICE = 0x0B;
public static final int INODE_TYPE_EXTENDED_CHAR_DEVICE = 0x0C;
public static final int INODE_TYPE_EXTENDED_FIFO = 0x0D;
public static final int INODE_TYPE_EXTENDED_SOCKET = 0x0E;
/**
* Data sizes
*/
public static final int MAX_UNIT_BLOCK_SIZE = 0x2000; // 8192 bytes = 8KiB
public static final int FRAGMENT_ENTRY_LENGTH = 16;
public static final int MAX_SYMLINK_DEPTH = 100;
/**
* General bit masks
*/
// In the superblock, all bits are set for a reference to an omitted section
public static final int SECTION_OMITTED = 0xFFFFFFFF;
// If an inode's file index has all bits set, it indicates there are no associated fragments
public static final int INODE_NO_FRAGMENTS = 0xFFFFFFFF;
// Used to find if a fragment is compressed from its "size" header (25th bit is set)
// Will be inverted to mask out the size
public static final int FRAGMENT_COMPRESSED_MASK = 1 << 24;
// Used to find if a data block is compressed from its "size" header (25th bit is set)
// Will be inverted to mask out the size
public static final int DATABLOCK_COMPRESSED_MASK = 1 << 24;
// Used to find if a data block is compressed from its "size" header (
// Will be inverted to mask out the size
public static final int METABLOCK_UNCOMPRESSED_MASK = 1 << 15;
}

View File

@ -0,0 +1,170 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.*;
import ghidra.app.util.bin.BinaryReader;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
public class SquashDirectoryTable {
// A map of directory table entries listed by the offset in which they appear within
// the uncompressed directory table
private final Map<Long, SquashDirectoryTableHeader> headersByOffset;
// Map of block offsets into the original archive to the offset into the uncompressed directory table
private final Map<Long, Long> archiveToReaderOffsets;
/**
* Represents the directory table within the SquashFS archive
* @param reader A binary reader for the entire SquashFS archive
* @param superBlock The SuperBlock for the current archive
* @param fragTable The processed fragment table for the archive
* @param monitor Monitor to allow the user to cancel the load
* @throws IOException Any read operation failure
* @throws CancelledException Archive load was cancelled
*/
public SquashDirectoryTable(BinaryReader reader, SquashSuperBlock superBlock,
SquashFragmentTable fragTable, TaskMonitor monitor)
throws IOException, CancelledException {
// Read from the start of the directory table
reader.setPointerIndex(superBlock.getDirectoryTableStart());
// The end address of the directory table depends on the number of fragments in the archive
long endOfDirTable;
if (!superBlock.isFragmentsUnused() && superBlock.getTotalFragments() > 0) {
endOfDirTable = fragTable.getMinFragPointer();
}
else {
endOfDirTable = superBlock.getFragmentTableStart();
}
headersByOffset = new HashMap<Long, SquashDirectoryTableHeader>();
archiveToReaderOffsets = new HashMap<Long, Long>();
// The reader will now contain ONLY the uncompressed bytes of the directory table
reader = decompressDirectoryTable(reader, endOfDirTable, superBlock.getCompressionType(),
monitor);
// While there are still additional blocks to process
while (reader.hasNext()) {
// Check if the user cancelled the load
monitor.checkCancelled();
// Create a new header to the map indexed by the reader's current position
headersByOffset.put(reader.getPointerIndex(),
new SquashDirectoryTableHeader(reader, superBlock, monitor));
}
}
/**
* This method will assign each directory entry to an inode
* @param inodeTable The object representing all inodes in the archive
* @param monitor Monitor to allow the user to cancel the load
* @throws CancelledException Archive load was cancelled
*/
public void assignInodes(SquashInodeTable inodeTable, TaskMonitor monitor)
throws CancelledException {
// For each of the directory headers in the table
for (long offset : headersByOffset.keySet()) {
// Check if the user cancelled the load
monitor.checkCancelled();
SquashDirectoryTableHeader header = headersByOffset.get(offset);
// Assign the proper inode to each of the directory entries under the header
for (SquashDirectoryTableEntry child : header.getEntries()) {
SquashInode inode = inodeTable.getInodeByNumber(child.getInodeNumber());
inode.setDirectoryTableEntry(child);
}
}
}
/**
* Get the headers associated with the given directory inode
* @param inode The inode to search by
* @return A list of headers that are associated with the given inode
*/
public List<SquashDirectoryTableHeader> getHeaders(SquashBasicDirectoryInode inode) {
List<SquashDirectoryTableHeader> headers = new ArrayList<SquashDirectoryTableHeader>();
// Set search boundaries
long blockStart = archiveToReaderOffsets.get((long) inode.getIndex());
long start = blockStart + inode.getOffset();
long end = start + inode.getUncompressedSize() - 3; // Account for "." and ".." entries
// Add all headers that start within the bounds given to be returned
for (long offset : headersByOffset.keySet()) {
if (offset >= start && offset < end) {
headers.add(headersByOffset.get(offset));
}
}
return headers;
}
/**
* Decompress the directory table and log block positions
* @param reader The BinaryReader pointed to the start of the section
* @param endAddress The address the section ends at
* @param compressionType The compression type if the archive
* @param monitor Monitor to allow the user to cancel the load
* @return A BinaryReader containing ONLY the uncompressed bytes of the section
* @throws IOException Any read operation failure
* @throws CancelledException Archive load was cancelled
*/
private BinaryReader decompressDirectoryTable(BinaryReader reader, long endAddress,
int compressionType, TaskMonitor monitor) throws IOException, CancelledException {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
// Keep track of how many bytes result from decompression
int totalUncompressedBytes = 0;
long directoryTableStart = reader.getPointerIndex();
// Continue reading until the end of the section is reached
while (reader.getPointerIndex() < endAddress) {
// Check if the user cancelled the load
monitor.checkCancelled();
long startOfBlockOffset = reader.getPointerIndex() - directoryTableStart;
// Decompress the current metablock
byte[] bytes = SquashUtils.decompressBlock(reader, compressionType, monitor);
bos.write(bytes);
archiveToReaderOffsets.put(startOfBlockOffset, (long) totalUncompressedBytes);
totalUncompressedBytes += bytes.length;
}
// Convert the output stream into a BinaryReader and return
return SquashUtils.byteArrayToReader(bos.toByteArray());
}
}

View File

@ -0,0 +1,94 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
public class SquashDirectoryTableEntry {
// Offset into the uncompressed directory table where this entry is
private final int addressOffset;
// Added to the base inode number to get the sub-entry inode number (note: signed short)
private final short inodeNumberOffset;
// Stores the basic inode type (i.e. if it's an "extended file" inode, it will be a "basic file" here)
private final short inodeType;
// The number of bytes that will represent the name of this sub-entry
private final short nameSize;
// The result of the addition of the base inode and the offset
private final int inodeNumber;
// Upon creation, this is just the name of this sub-entry, but will be expanded to the full path
private String path;
/**
* Represents an entry in the directory table
* @param reader A binary reader with pointer index at the start of the entry data
* @param superBlock The SuperBlock for the current archive
* @param baseInode The base inode number that is used to calculate the current number
* @throws IOException Any read operation failure
*/
public SquashDirectoryTableEntry(BinaryReader reader, SquashSuperBlock superBlock,
long baseInode) throws IOException {
addressOffset = reader.readNextUnsignedShort();
inodeNumberOffset = reader.readNextShort(); // NOTE: Signed
inodeType = reader.readNextShort();
nameSize = reader.readNextShort();
// The stored filename doesn't include the terminating null byte
// Note: Though technically 16 bits, Linux caps name size at 256 chars
path = reader.readNextAsciiString(nameSize + 1);
// Find the inode number using the base in the table entry header and the offset
inodeNumber = (int) (baseInode + inodeNumberOffset);
}
public int getAddressOffset() {
return addressOffset;
}
public short getInodeType() {
return inodeType;
}
// Extract the filename from the path
public String getFileName() {
int slashIndex = path.lastIndexOf('/');
// If the path is still just the name of the file
if (slashIndex == -1) {
return path;
}
return path.substring(slashIndex);
}
public int getInodeNumber() {
return inodeNumber;
}
public String getPath() {
return path;
}
}

View File

@ -0,0 +1,78 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import ghidra.app.util.bin.BinaryReader;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
public class SquashDirectoryTableHeader {
// The number of sub-entries (off by 1, so a "0" really means there is one sub-entry)
private final long numberOfEntries;
// Relative to the inode table start, this is the byte offset where the corresponding inode is
private final long directoryInodeOffset;
// The base inode number. Sub-entries will store their inodes as an offset to this one (+/-)
private final long baseInode;
// A list of sub-entries
private final List<SquashDirectoryTableEntry> entries;
/**
* Represents an header in the directory table
* @param reader A binary reader with pointer index at the start of the header data
* @param superBlock The SuperBlock for the current archive
* @param monitor Monitor to allow the user to cancel the load
* @throws IOException Any read operation failure
* @throws CancelledException Archive load was cancelled
*/
public SquashDirectoryTableHeader(BinaryReader reader, SquashSuperBlock superBlock,
TaskMonitor monitor) throws IOException, CancelledException {
numberOfEntries = reader.readNextUnsignedInt();
directoryInodeOffset = reader.readNextUnsignedInt();
baseInode = reader.readNextUnsignedInt();
// Create a list of entries under this header
entries = new ArrayList<SquashDirectoryTableEntry>();
for (int i = 0; i < numberOfEntries + 1; i++) {
// Check if the user cancelled the load
monitor.checkCancelled();
entries.add(new SquashDirectoryTableEntry(reader, superBlock, baseInode));
}
}
public List<SquashDirectoryTableEntry> getEntries() {
return entries;
}
public long getBaseInodeNumber() {
return baseInode;
}
public long getDirectoryInodeOffset() {
return directoryInodeOffset;
}
}

View File

@ -0,0 +1,76 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
public class SquashExtendedDirectoryInode extends SquashBasicDirectoryInode {
// The number of directory indexes that follow the main inode structure
private int indexCount;
// An index into the Xattr table or 0xFFFFFFFF if no this inode has no xattrs
private long xattrIndex;
/**
* Represents a SquashFS extended directory inode
* @param reader A binary reader with pointer index at the start of the inode data
* @param superBlock The SuperBlock for the current archive
* @throws IOException Any read operation failure
*/
public SquashExtendedDirectoryInode(BinaryReader reader, SquashSuperBlock superBlock)
throws IOException {
// Assign common inode header values
super(reader, superBlock, true);
// Assign extended directory specific values
hardLinkCount = reader.readNextUnsignedInt();
uncompressedFileSize = reader.readNextUnsignedInt();
blockIndex = reader.readNextUnsignedInt();
parentInodeNumber = reader.readNextUnsignedIntExact();
indexCount = reader.readNextUnsignedShort();
blockOffset = reader.readNextUnsignedShort();
xattrIndex = reader.readNextUnsignedInt();
// Skip all directory indexes following the inode
for (int i = 0; i < indexCount; i++) {
skipDirectoryListing(reader);
}
// Determine if the parent of the current inode is root
parentIsRoot = parentInodeNumber == superBlock.getInodeCount() + 1;
}
/**
* Skip the current directory listing as this implementation does not utilize them
* @param reader A binary reader with pointer index at the start of the directory listing
* @throws IOException Any read operation failure
*/
private void skipDirectoryListing(BinaryReader reader) throws IOException {
long index = reader.readNextUnsignedInt();
long start = reader.readNextUnsignedInt();
int nameSize = reader.readNextInt();
String name = reader.readNextAsciiString(nameSize + 1);
}
long getXattrIndex() {
return xattrIndex;
}
}

View File

@ -0,0 +1,81 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
public class SquashExtendedFileInode extends SquashBasicFileInode {
// The number of bytes saved by omitting zero bytes
private long sparseCount;
// The number of hard links to this inode
private long linkCount;
// An index into the Xattr table or 0xFFFFFFFF if no this inode has no xattrs
private long xattrIndex;
/**
* Represents a SquashFS extended file inode
* @param reader A binary reader with pointer index at the start of the inode data
* @param superBlock The SuperBlock for the current archive
* @throws IOException Any read operation failure
*/
public SquashExtendedFileInode(BinaryReader reader, SquashSuperBlock superBlock)
throws IOException {
// Assign common inode header values
super(reader, superBlock, true);
// Assign extended file specific values
startBlockOffset = reader.readNextLong();
fileSize = reader.readNextLong();
sparseCount = reader.readNextLong();
linkCount = reader.readNextUnsignedInt();
// If there are no fragments, skip the next two values
if (reader.peekNextInt() == -1) {
fragmentIndex = -1;
blockOffset = -1;
// Advance the reader position
reader.setPointerIndex(reader.getPointerIndex() + (BinaryReader.SIZEOF_INT * 2));
}
else {
fragmentIndex = reader.readNextUnsignedIntExact();
blockOffset = reader.readNextUnsignedIntExact();
}
xattrIndex = reader.readNextUnsignedInt();
// Calculate derived variables
setVars(reader, superBlock);
}
public long getSparseCount() {
return sparseCount;
}
public long getLinkCount() {
return linkCount;
}
public long getXattrIndex() {
return xattrIndex;
}
}

View File

@ -0,0 +1,418 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import static ghidra.formats.gfilesystem.fileinfo.FileAttributeType.*;
import java.io.IOException;
import java.io.OutputStream;
import java.util.*;
import ghidra.app.util.bin.BinaryReader;
import ghidra.app.util.bin.ByteProvider;
import ghidra.formats.gfilesystem.*;
import ghidra.formats.gfilesystem.annotations.FileSystemInfo;
import ghidra.formats.gfilesystem.fileinfo.FileAttributes;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
@FileSystemInfo(type = "squashfs", description = "SquashFS", factory = SquashFileSystemFactory.class)
public class SquashFileSystem extends AbstractFileSystem<SquashedFile> {
private ByteProvider provider;
private BinaryReader reader;
private SquashSuperBlock superBlock;
public SquashFileSystem(FSRLRoot fsFSRL, ByteProvider provider, FileSystemService fsService) {
super(fsFSRL, fsService);
fsIndex = new FileSystemIndexHelper<>(this, fsFSRL);
this.provider = provider;
// BinaryReader representing the entire archive
// Squash versions after 3.0 (2006) should be little endian
reader = new BinaryReader(provider, true /* LE */);
}
public void mount(TaskMonitor monitor) throws IOException, CancelledException {
monitor.setMessage("Opening " + SquashFileSystem.class.getSimpleName() + "...");
// Get the super block information for how to process the archive
superBlock = new SquashSuperBlock(reader);
// Parse the fragment table
SquashFragmentTable fragmentTable = new SquashFragmentTable(reader, superBlock, monitor);
// Parse the directory table
SquashDirectoryTable directoryTable =
new SquashDirectoryTable(reader, superBlock, fragmentTable, monitor);
// Parse the inode table
SquashInodeTable inodes = new SquashInodeTable(reader, superBlock, monitor);
// Build the parent/child relationships with the inodes
inodes.buildRelationships(monitor);
// The directory table entries point to inodes for additional information. Link the inodes
// to these entries
directoryTable.assignInodes(inodes, monitor);
// Give file structure to Ghidra to present to the user
SquashUtils.buildDirectoryStructure(fragmentTable, directoryTable, inodes, fsIndex,
monitor);
}
@Override
public ByteProvider getByteProvider(GFile file, TaskMonitor monitor)
throws IOException, CancelledException {
SquashedFile squashFile = fsIndex.getMetadata(file);
long fileSize = -1;
if (squashFile != null) {
fileSize = squashFile.getUncompressedSize();
}
// Decompress the file either to memory or storage and return a ByteProvider of the resulting file
return fsService.getDerivedByteProviderPush(provider.getFSRL(), file.getFSRL(),
file.getName(), fileSize, (os) -> {
extractFileToStream(os, file, monitor);
}, monitor);
}
/**
* Convert the given SquashFS file into a stream of bytes
* @param os The stream to write file data to
* @param file The file to convert
* @param monitor Monitor to allow the user to cancel the load
* @throws IOException Any read operation failure
* @throws CancelledException File load was cancelled
*/
public void extractFileToStream(OutputStream os, GFile file, TaskMonitor monitor)
throws IOException, CancelledException {
// If the current file is a symlink, try to follow it
file = followSymLink(file, 0);
SquashedFile squashedFile = fsIndex.getMetadata(file);
if (squashedFile == null) {
throw new IOException("Could not find SquashedFile associated with the symlink target");
}
// Stop if the associated inode is not a file
SquashInode inode = squashedFile.getInode();
if (!(inode.isFile())) {
throw new IOException("Inode is not a file");
}
// Get the associated file inode
SquashBasicFileInode fileInode = (SquashBasicFileInode) inode;
// Keep track of the total number of decompressed bytes for progress tracking reasons
long totalUncompressedBytes = 0;
// Set the monitor's completion point to be all bytes processed
monitor.initialize(fileInode.getFileSize());
// Process all the blocks comprising the file
totalUncompressedBytes += processFileBlocks(squashedFile, fileInode, os, monitor);
// Grab the tail end of the file if it exists
if (squashedFile.hasFragment()) {
totalUncompressedBytes += processTailEnd(squashedFile, fileInode, os, monitor);
}
// Monitor should be 100% at this point
monitor.setProgress(totalUncompressedBytes);
}
/**
* Given a GFile representing a symlink, return the destination GFile, recursing into referenced
* symlinks as needed. If the given file is not a symlink, it will be returned
* @param symLinkFile The file representing a symlink containing the target
* @param depth The current recursion depth to prevent recursing too far
* @return The destination file
* @throws IOException Issues relating to locating a symlink target
*/
private GFile followSymLink(GFile symLinkFile, int depth) throws IOException {
// Check if a file was supplied properly
if (symLinkFile == null) {
return null;
}
// Get the path associated with the given symlink
String path = getSymLinkPath(symLinkFile);
// If path is null, then the given file is not a symlink and should be returned as the destination
if (path == null) {
return symLinkFile;
}
// Make sure to not follow symlinks too far
if (depth > SquashConstants.MAX_SYMLINK_DEPTH) {
throw new IOException("Did not find symlink destination after max traversal");
}
// Start with the parent at the root of the archive, as all paths will be absolute
GFile currentFile = symLinkFile.getParentFile();
// Split up the path into its parts
List<String> pathParts = new ArrayList<String>(Arrays.asList(path.split("/")));
// Future references to "." are redundant, so remove them along with any blank parts
pathParts.removeIf(part -> part.contentEquals(".") || part.isBlank());
// Iterate over all parts of the input path, removing portions as ".." appears
ListIterator<String> iterator = pathParts.listIterator();
while (iterator.hasNext()) {
// Get the next portion of the path
String currentPart = iterator.next();
// If the link references up a directory
if (currentPart.equals("..")) {
// Move up a directory
currentFile = currentFile.getParentFile();
}
else {
// Get the file representing the next portion of the path
currentFile = fsIndex.lookup(currentFile, currentPart, null);
// Determine if the current file is a symlink and follow it if so
currentFile = followSymLink(currentFile, depth + 1);
}
// Check if the lookup failed
if (currentFile == null) {
throw new IOException("Could not find file within the given parent directory");
}
// Keep track of the depth
depth++;
}
// Return GFile representing the destination of the symlink
return currentFile;
}
/**
* If the given file is a symlink, return the path it points to (null if file is not a symlink)
* @param file The file to check
* @return The symlink path
* @throws IOException There was no SquashedFile for the given file
*/
private String getSymLinkPath(GFile file) throws IOException {
// Get the associated SquashedFile and make sure it is not null
SquashedFile possibleSymLinkFile = fsIndex.getMetadata(file);
if (possibleSymLinkFile == null) {
throw new IOException("Cannot retrieve SquashedFile associated with the given file");
}
// Check if the current part is a symlink
if (possibleSymLinkFile.getInode().isSymLink()) {
// Get and convert the associated inode
SquashSymlinkInode symLinkInode = (SquashSymlinkInode) possibleSymLinkFile.getInode();
// Get the target path
return symLinkInode.getPath();
}
// If the file is not a symlink, return null
return null;
}
/**
* Decompress (if needed) all data block associated with the given file and write to OutputStream
* @param squashedFile The file to process
* @param fileInode The inode associated with the file
* @param os The stream to write to
* @param monitor The monitor to keep track of the progress with
* @return The number of uncompressed bytes the blocks used
* @throws CancelledException The user cancelled the file read
* @throws IOException Any read error
*/
private int processFileBlocks(SquashedFile squashedFile, SquashBasicFileInode fileInode,
OutputStream os, TaskMonitor monitor) throws CancelledException, IOException {
int[] blockSizes = fileInode.getBlockSizes();
// Location of starting block
long location = fileInode.getStartBlockOffset();
int blockUncompressedBytes = 0;
// Handle the primary bytes of the file
for (int blockSizeHeader : blockSizes) {
// Check if the user cancelled the load
monitor.checkCancelled();
// Set the monitor's progress
monitor.setProgress(blockUncompressedBytes);
// Extract data from the block size header
boolean isCompressed =
(blockSizeHeader & SquashConstants.DATABLOCK_COMPRESSED_MASK) == 0;
long size = blockSizeHeader & ~SquashConstants.DATABLOCK_COMPRESSED_MASK;
// If we encounter a block with size zero, we write a full block of zeros to the output
if (size <= 0) {
// Write all zeroes for the given blockSize
size = superBlock.getBlockSize();
os.write(new byte[(int) size]);
// Increment the progress
blockUncompressedBytes += size;
continue;
}
// Set the reader to read from the block start location
reader.setPointerIndex(location);
// Move location to the start of the next block for next iteration
location += size;
byte[] buffer = null;
// Check for compression
if (isCompressed) {
buffer = SquashUtils.decompressBytes(reader, (int) size,
superBlock.getCompressionType(), monitor);
}
else {
buffer = reader.readNextByteArray((int) size);
}
// Write to the output and increment progress
os.write(buffer);
blockUncompressedBytes += buffer.length;
}
return blockUncompressedBytes;
}
/**
* Decompress (if needed) the tail end of the given file and write to OutputStream
* @param squashedFile The file to process
* @param fileInode The inode associated with the file
* @param os The stream to write to
* @param monitor The monitor to keep track of the progress with
* @return The number of uncompressed bytes the tail end used
* @throws CancelledException The user cancelled the file read
* @throws IOException Any read error
*/
private int processTailEnd(SquashedFile squashedFile, SquashBasicFileInode fileInode,
OutputStream os, TaskMonitor monitor) throws CancelledException, IOException {
SquashFragment fragment = squashedFile.getFragment();
byte[] buffer = null;
if (fragment.isCompressed()) {
// Set the pointer to where (relative to the start of the archive) the fragment starts
reader.setPointerIndex(fragment.getFragmentOffset());
// Decompress the fragment into a byte array
buffer = SquashUtils.decompressBytes(reader, (int) fragment.getFragmentSize(),
superBlock.getCompressionType(), monitor);
// Remove non-relevant portion of the fragment block
buffer = Arrays.copyOfRange(buffer, fileInode.getBlockOffset(),
fileInode.getBlockOffset() + fileInode.getTailEndSize());
}
else {
// Set the pointer to start of the tail end of file within the fragment
reader.setPointerIndex(fragment.getFragmentOffset() + fileInode.getBlockOffset());
// Read only relevant the portion of the fragment
buffer = reader.readNextByteArray(fileInode.getTailEndSize());
}
// Write to the output and increment progress
os.write(buffer);
return buffer.length;
}
@Override
public boolean isClosed() {
return provider == null;
}
@Override
public FileAttributes getFileAttributes(GFile file, TaskMonitor monitor) {
FileAttributes result = new FileAttributes();
SquashedFile squashedFile = fsIndex.getMetadata(file);
if (squashedFile != null) {
SquashInode inode = squashedFile.getInode();
// Add additional attributes to the root directory
if (fsIndex.getRootDir().equals(file)) {
result.add("Compression used", superBlock.getCompressionTypeString());
result.add("Block size", superBlock.getBlockSize());
result.add("Inode count", superBlock.getInodeCount());
result.add("Fragment count", superBlock.getTotalFragments());
result.add("SquashFS version", superBlock.getVersionString());
result.add(MODIFIED_DATE_ATTR, new Date(superBlock.getModTime()));
}
else {
result.add(MODIFIED_DATE_ATTR, new Date(inode.getModTime()));
}
// Add general attributes
result.add(NAME_ATTR, squashedFile.getName());
result.add(FSRL_ATTR, file.getFSRL());
// Add file-related attributes
if (inode.isFile()) {
SquashBasicFileInode fileInode = (SquashBasicFileInode) inode;
result.add(SIZE_ATTR, squashedFile.getUncompressedSize());
result.add(COMPRESSED_SIZE_ATTR, fileInode.getCompressedFileSize());
}
else if (inode.isSymLink()) {
SquashSymlinkInode symLinkInode = (SquashSymlinkInode) inode;
result.add(SYMLINK_DEST_ATTR, symLinkInode.getPath());
}
}
return result;
}
@Override
public void close() throws IOException {
refManager.onClose();
fsIndex.clear();
if (provider != null) {
provider.close();
provider = null;
}
}
}

View File

@ -0,0 +1,52 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.ByteProvider;
import ghidra.formats.gfilesystem.*;
import ghidra.formats.gfilesystem.factory.GFileSystemFactoryByteProvider;
import ghidra.formats.gfilesystem.factory.GFileSystemProbeBytesOnly;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
public class SquashFileSystemFactory
implements GFileSystemFactoryByteProvider<SquashFileSystem>, GFileSystemProbeBytesOnly {
public static final int PROBE_BYTES_REQUIRED = SquashConstants.MAGIC.length;
@Override
public SquashFileSystem create(FSRLRoot targetFSRL, ByteProvider byteProvider,
FileSystemService fsService, TaskMonitor monitor)
throws IOException, CancelledException {
SquashFileSystem fs = new SquashFileSystem(targetFSRL, byteProvider, fsService);
fs.mount(monitor);
return fs;
}
@Override
public int getBytesRequired() {
return PROBE_BYTES_REQUIRED;
}
@Override
public boolean probeStartBytes(FSRL containerFSRL, byte[] startBytes) {
return SquashUtils.isSquashFS(startBytes);
}
}

View File

@ -0,0 +1,71 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
import ghidra.util.Msg;
public class SquashFragment {
// Offset within the archive where the fragment starts
private final long fragmentOffset;
// Header for the fragment which contains two fields:
// isCompressed - If the 1 << 24 bit is cleared, the fragment is compressed
// fragmentSize - The size of the fragment in bytes (lower 24 bits)
private final int header;
// This field is unused as of 4.0
private final int unusedField;
/**
* Represents a SquashFS fragment
* @param reader A binary reader with pointer index at the start of the fragment data
* @throws IOException Any read operation failure
*/
public SquashFragment(BinaryReader reader) throws IOException {
fragmentOffset = reader.readNextLong();
// The next integer contains both size and compression info to be masked out
header = reader.readNextInt();
// Check if the unused value is zero and warn the user if it isn't
unusedField = reader.readNextInt();
}
public long getFragmentOffset() {
return fragmentOffset;
}
public boolean isCompressed() {
return (header & SquashConstants.FRAGMENT_COMPRESSED_MASK) == 0;
}
public long getFragmentSize() {
return header & ~SquashConstants.FRAGMENT_COMPRESSED_MASK;
}
public int getUnusedField() {
if (unusedField != 0) {
Msg.warn(this, "Fragment has non-zero \"unused\" field");
}
return unusedField;
}
}

View File

@ -0,0 +1,106 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import ghidra.app.util.bin.BinaryReader;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
public class SquashFragmentTable {
// A list of fragments within the archive
private final List<SquashFragment> fragmentEntries;
// The lowest fragment pointer in the archive. Used for locating the end of the directory table
private long minFragPointer = Long.MAX_VALUE;
/**
* Represents the fragment table within the SquashFS archive
* @param reader A binary reader for the entire SquashFS archive
* @param superBlock The SuperBlock for the current archive
* @param monitor Monitor to allow the user to cancel the load
* @throws IOException Any read operation failure
* @throws CancelledException The user cancelled the archive load
*/
public SquashFragmentTable(BinaryReader reader, SquashSuperBlock superBlock,
TaskMonitor monitor) throws IOException, CancelledException {
// Check if the user cancelled the load
monitor.checkCancelled();
// Read from the start of the directory table
reader.setPointerIndex(superBlock.getFragmentTableStart());
fragmentEntries = new ArrayList<SquashFragment>();
// Based on the number of bytes all fragments in the archive take up, calculate the number
// of indexes that the archive will have to those blocks
long numFragments =
((superBlock.getTotalFragments() * SquashConstants.FRAGMENT_ENTRY_LENGTH) +
SquashConstants.MAX_UNIT_BLOCK_SIZE - 1) / SquashConstants.MAX_UNIT_BLOCK_SIZE;
// Store the list of fragment pointers
long[] fragmentPointers = reader.readNextLongArray((int) numFragments);
// For each pointer to a fragment, move to that fragment and get the data from it
for (int i = 0; i < fragmentPointers.length; i++) {
// Check if the user cancelled the load
monitor.checkCancelled();
// Assign the smallest fragment pointer
minFragPointer = Math.min(minFragPointer, fragmentPointers[i]);
// Read from the start of the fragment
reader.setPointerIndex(fragmentPointers[i]);
// If needed, decompress the fragment
byte[] uncompressedBytes =
SquashUtils.decompressBlock(reader, superBlock.getCompressionType(), monitor);
// This reader will only hold the uncompressed bytes
BinaryReader fragmentReader = SquashUtils.byteArrayToReader(uncompressedBytes);
// Add all fragments to the entry list
while (fragmentReader.hasNext()) {
// Check if the user cancelled the load
monitor.checkCancelled();
fragmentEntries.add(new SquashFragment(fragmentReader));
}
}
}
public List<SquashFragment> getFragments() {
return fragmentEntries;
}
public SquashFragment getFragment(int index) {
if (index >= 0 && index < fragmentEntries.size()) {
return fragmentEntries.get(index);
}
return null;
}
public long getMinFragPointer() {
return minFragPointer;
}
}

View File

@ -0,0 +1,122 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
public class SquashInode {
// The type of inode as an integer
private final short inodeType;
// Unix file permissions bitmask
private final short permissions;
// Index into the ID table where the user ID of the owner resides
private final int userID;
// Index into the ID table where the group ID of the owner resides
private final int groupID;
// Unix timestamp of the last time the inode was modified (not counting leap seconds)
private final long modTime;
// A unique number for this inode. Must be at least 1 and less than the total number of inodes
private final int inodeNumber;
// The parent of this inode
private SquashInode parent = null;
// The directory table entry that refers to this inode
private SquashDirectoryTableEntry directoryTableEntry;
/**
* Represents a generic SquashFS inode
* @param reader A binary reader with pointer index at the start of the inode data
* @param superBlock The SuperBlock for the current archive
* @throws IOException Any read operation failure
*/
public SquashInode(BinaryReader reader, SquashSuperBlock superBlock) throws IOException {
// Assign common inode header values
inodeType = reader.readNextShort();
permissions = reader.readNextShort();
userID = reader.readNextUnsignedShort();
groupID = reader.readNextUnsignedShort();
modTime = reader.readNextUnsignedInt();
inodeNumber = reader.readNextUnsignedIntExact();
}
public short getPermissions() {
return permissions;
}
public short getType() {
return inodeType;
}
public int getUserID() {
return userID;
}
public int getGroupID() {
return groupID;
}
public long getModTime() {
return modTime;
}
public int getNumber() {
return inodeNumber;
}
void setParent(SquashInode parentInode) {
parent = parentInode;
}
public SquashBasicDirectoryInode getParent() {
if (!parent.isDir()) {
return null;
}
return (SquashBasicDirectoryInode) parent;
}
void setDirectoryTableEntry(SquashDirectoryTableEntry entry) {
directoryTableEntry = entry;
}
public SquashDirectoryTableEntry getDirectoryTableEntry() {
return directoryTableEntry;
}
public boolean isDir() {
return inodeType == SquashConstants.INODE_TYPE_BASIC_DIRECTORY ||
inodeType == SquashConstants.INODE_TYPE_EXTENDED_DIRECTORY;
}
public boolean isFile() {
return inodeType == SquashConstants.INODE_TYPE_BASIC_FILE ||
inodeType == SquashConstants.INODE_TYPE_EXTENDED_FILE;
}
public boolean isSymLink() {
return inodeType == SquashConstants.INODE_TYPE_BASIC_SYMLINK ||
inodeType == SquashConstants.INODE_TYPE_EXTENDED_SYMLINK;
}
}

View File

@ -0,0 +1,196 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
public class SquashInodeTable {
// An array of inodes indexed by their inode number
private final SquashInode[] inodes;
// The offset in the uncompressed inode table where the root inode begins
private long rootInodeOffset;
// The root inode of the archive
private SquashInode rootInode;
/**
* Represents the inode table within the SquashFS archive
* @param reader A binary reader for the entire SquashFS archive
* @param superBlock The SuperBlock for the current archive
* @param monitor Monitor to allow the user to cancel the load
* @throws IOException Any read operation failure
* @throws CancelledException Archive load was cancelled
*/
public SquashInodeTable(BinaryReader reader, SquashSuperBlock superBlock, TaskMonitor monitor)
throws IOException, CancelledException {
// Read from the start of the inode table
reader.setPointerIndex(superBlock.getInodeTableStart());
// The reader will now contain ONLY the uncompressed bytes of the inode table
reader =
decompressInodeTable(reader, superBlock.getDirectoryTableStart(), superBlock, monitor);
// Create inode array. inode count is off by one
inodes = new SquashInode[(int) superBlock.getInodeCount() + 1];
// inodes begin indexing at 1, so 0th inode is null
inodes[0] = null;
// While there are still inodes to process in the decompressed stream
while (reader.hasNext()) {
// Check if the user cancelled the load
monitor.checkCancelled();
boolean isRootInode = reader.getPointerIndex() == rootInodeOffset;
// Get the inode type without advancing the reader
short inodeType = reader.peekNextShort();
SquashInode tempInode;
// Create a new inode based on the inode type
switch (inodeType) {
case SquashConstants.INODE_TYPE_BASIC_FILE:
tempInode = new SquashBasicFileInode(reader, superBlock, false);
break;
case SquashConstants.INODE_TYPE_EXTENDED_FILE:
tempInode = new SquashExtendedFileInode(reader, superBlock);
break;
case SquashConstants.INODE_TYPE_BASIC_DIRECTORY:
tempInode = new SquashBasicDirectoryInode(reader, superBlock, false);
break;
case SquashConstants.INODE_TYPE_EXTENDED_DIRECTORY:
tempInode = new SquashExtendedDirectoryInode(reader, superBlock);
break;
case SquashConstants.INODE_TYPE_BASIC_SYMLINK:
tempInode = new SquashSymlinkInode(reader, superBlock, false);
break;
case SquashConstants.INODE_TYPE_EXTENDED_SYMLINK:
tempInode = new SquashSymlinkInode(reader, superBlock, true);
break;
default:
// All other inode types are effectively skipped, but processed for info
tempInode = new SquashOtherInode(reader, superBlock, inodeType);
}
// Validate the inode number, then add the given inode to the list (indexed by its number)
int tempInodeNumber = tempInode.getNumber();
if (tempInodeNumber == 0 || tempInodeNumber > superBlock.getInodeCount()) {
throw new IOException("Invalid inode number found: " + tempInodeNumber);
}
inodes[tempInode.getNumber()] = tempInode;
// Record root inode if needed
if (isRootInode) {
rootInode = tempInode;
}
}
}
public SquashInode[] getInodes() {
return inodes;
}
public SquashInode getInodeByNumber(int inodeNumber) {
return inodes[inodeNumber];
}
public SquashInode getRootInode() {
return rootInode;
}
/**
* Build the parent/child relationships between inodes
* @param monitor Monitor to allow the user to cancel the load
* @throws CancelledException Archive load was cancelled
*/
public void buildRelationships(TaskMonitor monitor) throws CancelledException {
// Work backwards (last inode is root) and skip the first inode
for (int i = inodes.length - 1; i > 0; i--) {
// Check if the user cancelled the load
monitor.checkCancelled();
SquashInode currentInode = inodes[i];
// Only directory inodes have parent/child relationships
if (currentInode.isDir()) {
SquashBasicDirectoryInode dirInode = (SquashBasicDirectoryInode) currentInode;
// Check if the parent of the current node is the root node
if (!dirInode.isParentRoot()) {
dirInode.setParent(inodes[dirInode.getParentInodeNumber()]);
}
}
}
}
/**
* Decompress the inode table and record the root inode
* @param reader The BinaryReader pointed to the start of the section
* @param endAddress The address the section ends at
* @param superBlock The SuperBlock for the current archive
* @param monitor Monitor to allow the user to cancel the load
* @return A BinaryReader containing ONLY the uncompressed bytes of the section
* @throws IOException Any read operation failure
* @throws CancelledException Archive load was cancelled
*/
private BinaryReader decompressInodeTable(BinaryReader reader, long endAddress,
SquashSuperBlock superBlock, TaskMonitor monitor)
throws IOException, CancelledException {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
// Keep track of how many bytes result from decompression
int totalUncompressedBytes = 0;
// Continue reading until the end of the section is reached
while (reader.getPointerIndex() < endAddress) {
// Check if the user cancelled the load
monitor.checkCancelled();
// If processing the inode table, check if the current metadata block contains the root inode
if ((reader.getPointerIndex() - superBlock.getInodeTableStart()) == superBlock
.getRootInodeBlockLocation()) {
// Tell the inode table the root inode location within the uncompressed bytes
rootInodeOffset = totalUncompressedBytes + superBlock.getRootInodeOffset();
}
// Decompress the current metablock
byte[] bytes =
SquashUtils.decompressBlock(reader, superBlock.getCompressionType(), monitor);
// Add bytes to the stream
bos.write(bytes);
totalUncompressedBytes += bytes.length;
}
// Convert the output stream into a BinaryReader and return
return SquashUtils.byteArrayToReader(bos.toByteArray());
}
}

View File

@ -0,0 +1,58 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
import ghidra.util.Msg;
public class SquashMetablock {
// Header for the metablock which contains two fields:
// isCompressed - If the 1 << 15 bit is cleared, the metablock is compressed
// fragmentSize - The size of the metablock in bytes (lower 15 bits)
private final short header;
/**
* Represents the metadata proceeding a data block within the SquashFS archive
* @param reader A binary reader with pointer index at the start of the metadata
* @throws IOException Any read operation failure
*/
public SquashMetablock(BinaryReader reader) throws IOException {
// The metadata short contains both size and compression info to be masked out
header = reader.readNextShort();
}
public boolean isCompressed() {
return (header & SquashConstants.METABLOCK_UNCOMPRESSED_MASK) == 0;
}
public short getBlockSize() {
short blockSize = (short) (header & ~SquashConstants.METABLOCK_UNCOMPRESSED_MASK);
// Let the user know if the current block size exceeds what is allowed per standard
if (blockSize > SquashConstants.MAX_UNIT_BLOCK_SIZE) {
Msg.warn(this, "Unit block size is too large!");
}
return blockSize;
}
}

View File

@ -0,0 +1,94 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import java.util.HexFormat;
import ghidra.app.util.bin.BinaryReader;
public class SquashOtherInode extends SquashInode {
// The number of hard links to this inode
private long linkCount;
// The size of the target path in bytes
private int targetSize;
// Index into the xattr table
private long xattrIndex;
// System specific device number (on Linux, this contains both a major and a minor device number)
// major = (deviceNumber & 0xFFF00) >> 8
// minor = (deviceNumber & 0x000FF)
private long deviceNumber;
/**
* Represents a SquashFS basic file inode
* @param reader A binary reader with pointer index at the start of the inode data
* @param superBlock The SuperBlock for the current archive
* @param inodeType The type of the inode
* @throws IOException Any read operation failure
*/
public SquashOtherInode(BinaryReader reader, SquashSuperBlock superBlock, int inodeType)
throws IOException {
// Assign common inode header values
super(reader, superBlock);
switch (inodeType) {
case SquashConstants.INODE_TYPE_BASIC_BLOCK_DEVICE:
case SquashConstants.INODE_TYPE_BASIC_CHAR_DEVICE:
linkCount = reader.readNextUnsignedInt();
deviceNumber = reader.readNextUnsignedInt();
break;
case SquashConstants.INODE_TYPE_EXTENDED_BLOCK_DEVICE:
case SquashConstants.INODE_TYPE_EXTENDED_CHAR_DEVICE:
linkCount = reader.readNextUnsignedInt();
deviceNumber = reader.readNextUnsignedInt();
xattrIndex = reader.readNextUnsignedInt();
break;
case SquashConstants.INODE_TYPE_BASIC_FIFO:
case SquashConstants.INODE_TYPE_BASIC_SOCKET:
linkCount = reader.readNextUnsignedInt();
break;
case SquashConstants.INODE_TYPE_EXTENDED_FIFO:
case SquashConstants.INODE_TYPE_EXTENDED_SOCKET:
linkCount = reader.readNextUnsignedInt();
xattrIndex = reader.readNextUnsignedInt();
break;
default:
throw new IOException(
"Unknown inode type: 0x" + HexFormat.of().toHexDigits(inodeType));
}
}
public long getLinkCount() {
return linkCount;
}
public int getTargetSize() {
return targetSize;
}
public long getXattrIndex() {
return xattrIndex;
}
public long getDeviceNumber() {
return deviceNumber;
}
}

View File

@ -0,0 +1,353 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
import ghidra.util.Msg;
public class SquashSuperBlock {
/**
* ===== 32 BIT INTEGER VALUES =====
*/
// The magic for a Squash file. "HSQS" for little endian, "SQSH" for big endian
private final int magic;
// The number of inodes in the archive
private final long inodeCount;
// Unix timestamp of the last time the archive was modified (not counting leap seconds)
private final long modTime;
// The size of a data block in bytes (must be a power of 2 between 4KB and 1 MiB)
private final long blockSize;
// The number of entries in the fragment table
private final long totalFragments;
/**
* ===== 16 BIT SHORT VALUES =====
*/
// The type of compression used
private final int compressionType;
// This should equal log2(blockSize). If that's not the case, the archive is considered corrupt
private final int blockLog;
// Flags with additional information about the archive
private final int flags;
// The number of entries in the ID lookup table
private final int totalIDs;
// The major SquashFS version (should always be 4)
private final int majorVersion;
// The minor SquashFS version (should always be 0)
private final int minorVersion;
/**
* ===== 64 BIT LONG VALUES =====
*/
// A reference to the inode of the root directory
// The upper 48 bits are the location where the metadata block resides
// The lower 16 bits are the offset into the uncompressed metadata block where the inode starts
private final long rootInode;
// The number of bytes used by the archive. This may be less than the file size due to the
// total file size needing to be padded to be a multiple of the block size
private final long bytesUsed;
// The byte offset to the start of the ID table
private final long idTableStart;
// The byte offset to the start of the XATTR ID table
private final long xattrIdTableStart;
// The byte offset to the start of the inode table
private final long inodeTableStart;
// The byte offset to the start of the directory table
private final long directoryTableStart;
// The byte offset to the start of the fragment table
private final long fragmentTableStart;
// The byte offset to the start of the export table
private final long exportTableStart;
/**
* ===== FLAGS BREAKDOWN =====
* NOTE: Descriptions are for if the flag is set
* 0x0001 - inodes are not compressed (NOTE: UID/GIDs also share this setting)
* 0x0002 - Data blocks are not compressed
* 0x0004 - Not used in SquashFS version 4+. This should never be set
* 0x0008 - Fragments are not compressed
* 0x0010 - Files are not fragmented and will be padded to reach a full block size
* 0x0020 - If last block size < block size, it will be stored as a fragment
* 0x0040 - Identical files are only stored once
* 0x0080 - The export table is populated, allowing for exporting via NFS
* 0x0100 - The Xattrs are stored uncompressed
* 0x0200 - There are no Xattrs in the archive
* 0x0400 - The compression algorithms section is present (only for certain algorithms)
* 0x0800 - The ID table is uncompressed
*/
/**
* Represents the SuperBlock (archive processing information) within the SquashFS archive
* @param reader A binary reader for the entire SquashFS archive
* @throws IOException Any read operation failure
*/
SquashSuperBlock(BinaryReader reader) throws IOException {
// Fetch the 32 bit integer fields
magic = reader.readNextUnsignedIntExact();
inodeCount = reader.readNextUnsignedInt();
modTime = reader.readNextUnsignedInt();
blockSize = reader.readNextUnsignedInt();
totalFragments = reader.readNextUnsignedInt();
// Fetch the 16 bit short fields
compressionType = reader.readNextUnsignedShort();
blockLog = reader.readNextUnsignedShort();
flags = reader.readNextUnsignedShort();
totalIDs = reader.readNextUnsignedShort();
majorVersion = reader.readNextUnsignedShort();
minorVersion = reader.readNextUnsignedShort();
// Fetch the 64 bit long fields
rootInode = reader.readNextLong();
bytesUsed = reader.readNextLong();
idTableStart = reader.readNextLong();
xattrIdTableStart = reader.readNextLong();
inodeTableStart = reader.readNextLong();
directoryTableStart = reader.readNextLong();
fragmentTableStart = reader.readNextLong();
exportTableStart = reader.readNextLong();
// Check that the SuperBlock values are what is expected by this FileSystem
checkCompatibility();
}
public long getMagicBytes() {
return magic;
}
public long getInodeCount() {
return inodeCount;
}
public long getModTime() {
return modTime;
}
public long getBlockSize() {
return blockSize;
}
public long getTotalFragments() {
return totalFragments;
}
public int getCompressionType() {
return compressionType;
}
public int getBlockLog() {
return blockLog;
}
public int getRawFlags() {
return flags;
}
public int getTotalIDs() {
return totalIDs;
}
public int getMajorVersion() {
return majorVersion;
}
public int getMinorVersion() {
return minorVersion;
}
public long getRootInode() {
return rootInode;
}
public long getRootInodeBlockLocation() {
return rootInode >> 16;
}
public long getRootInodeOffset() {
return rootInode & 0xFFFF;
}
public long getBytesUsed() {
return bytesUsed;
}
public long getIdTableStart() {
return idTableStart;
}
public long getXattrIdTableStart() {
return xattrIdTableStart;
}
public long getInodeTableStart() {
return inodeTableStart;
}
public long getDirectoryTableStart() {
return directoryTableStart;
}
public long getFragmentTableStart() {
return fragmentTableStart;
}
public long getExportTableStart() {
return exportTableStart;
}
public boolean isInodesUncompressed() {
return (flags & SquashConstants.UNCOMPRESSED_INODES) != 0;
}
public boolean isDataUncompressed() {
return (flags & SquashConstants.UNCOMPRESSED_DATA_BLOCKS) != 0;
}
public boolean isUsedFlagSet() {
return (flags & SquashConstants.UNUSED_FLAG) != 0;
}
public boolean isFragmentsUncompressed() {
return (flags & SquashConstants.UNCOMPRESSED_FRAGMENTS) != 0;
}
public boolean isFragmentsUnused() {
return (flags & SquashConstants.NO_FRAGMENTS) != 0;
}
public boolean isAlwaysFragment() {
return (flags & SquashConstants.ALWAYS_FRAGMENT) != 0;
}
public boolean allowDuplicates() {
return (flags & SquashConstants.NO_DUPLICATE_DATE) != 0;
}
public boolean isExportable() {
return (flags & SquashConstants.EXPORT_TABLE_EXISTS) != 0;
}
public boolean isXattrsUncompressed() {
return (flags & SquashConstants.UNCOMPRESSED_XATTRS) != 0;
}
public boolean hasXattrs() {
return (flags & SquashConstants.NO_XATTRS) != 0;
}
public boolean isCompressionOptionsPresent() {
return (flags & SquashConstants.COMPRESSION_OPTIONS_EXIST) != 0;
}
public boolean isIDsUncompressed() {
return (flags & SquashConstants.UNCOMPRESSED_IDS) != 0;
}
public String getVersionString() {
return majorVersion + "." + minorVersion;
}
/**
* Validate the SuperBlock against expected values and warn the user of any possible issues
*/
public void checkCompatibility() {
// Verify the SquashFS version and warn the user if it isn't 4.0
if ((this.majorVersion != 4) || (this.minorVersion != 0)) {
Msg.warn(this, "SquashFS archive is version " + majorVersion + "." + minorVersion +
" but Ghidra has only been tested with version 4.0");
}
// Let the user know if the Xattr table is missing
if ((xattrIdTableStart == SquashConstants.SECTION_OMITTED)) {
Msg.info(this, "In SquashFS archive, the optional Xattr table is missing");
}
// Let the user know if the fragment table is missing
if ((fragmentTableStart == SquashConstants.SECTION_OMITTED)) {
Msg.info(this, "In SquashFS archive, the optional fragment table is missing");
}
// Let the user know if the export table is missing
if ((exportTableStart == SquashConstants.SECTION_OMITTED)) {
Msg.info(this, "In SquashFS archive, the optional export table is missing");
}
// Check if the unused flag is set and warn the user if it is
if (isUsedFlagSet() && (majorVersion >= 4)) {
Msg.warn(this,
"In SquashFS archive super block, the unused flag is set when it should " +
"be cleared. Per standard, the archive is invalid. Continue with caution!");
}
// Check if blockLog is correct and warn the user if not
if (1 << blockLog != blockSize) {
Msg.warn(this,
"In SquashFS archive super block, the blocksize does not match the blockLog value." +
" Per standard, the archive is invalid. Continue with caution!");
}
// Check if the flags for compressed inodes and compressed IDs match and warn the user if not
if ((isInodesUncompressed() != isIDsUncompressed()) && (majorVersion >= 4)) {
Msg.warn(this,
"In SquashFS archive super block, the flags for whether inodes and IDs" +
"are compressed should match. This is to maintain backwards compantability, " +
"but they differ in your archive. Continue with caution!");
}
}
public String getCompressionTypeString() {
switch (compressionType) {
case SquashConstants.COMPRESSION_TYPE_GZIP:
return "gzip";
case SquashConstants.COMPRESSION_TYPE_LZMA:
return "lzma";
case SquashConstants.COMPRESSION_TYPE_LZO:
return "lzo";
case SquashConstants.COMPRESSION_TYPE_XZ:
return "xz";
case SquashConstants.COMPRESSION_TYPE_LZ4:
return "lz4-block";
case SquashConstants.COMPRESSION_TYPE_ZSTD:
return "zstd";
default:
return "Unknown";
}
}
}

View File

@ -0,0 +1,57 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import ghidra.app.util.bin.BinaryReader;
public class SquashSymlinkInode extends SquashInode {
// The number of hard links to this inode
private long linkCount;
// The path to the link target (not null terminated when stored)
private String targetPath;
// Index into the xattr table
private long xattrIndex;
public SquashSymlinkInode(BinaryReader reader, SquashSuperBlock superBlock, boolean isExtended)
throws IOException {
super(reader, superBlock);
linkCount = reader.readNextUnsignedInt();
int targetSize = reader.readNextInt();
targetPath = reader.readNextAsciiString(targetSize);
if (isExtended) {
xattrIndex = reader.readNextUnsignedInt();
}
}
public long getLinkCount() {
return linkCount;
}
public String getPath() {
return targetPath;
}
public long getXattrIndex() {
return xattrIndex;
}
}

View File

@ -0,0 +1,286 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
import java.io.IOException;
import java.io.InputStream;
import java.util.List;
import org.apache.commons.compress.compressors.deflate.DeflateCompressorInputStream;
import org.apache.commons.compress.compressors.lz4.BlockLZ4CompressorInputStream;
import org.apache.commons.compress.compressors.xz.XZCompressorInputStream;
import org.tukaani.xz.LZMAInputStream;
import ghidra.app.util.bin.*;
import ghidra.file.formats.gzip.GZipConstants;
import ghidra.formats.gfilesystem.FileSystemIndexHelper;
import ghidra.formats.gfilesystem.GFile;
import ghidra.util.Msg;
import ghidra.util.exception.CancelledException;
import ghidra.util.task.TaskMonitor;
public class SquashUtils {
/**
* Match the first four of the given bytes against the SquashFS magic bytes
* @param bytes The first bytes of a file (must have >= 4 bytes)
* @return Whether or not the bytes match the SquashFS magic
*/
public static boolean isSquashFS(byte[] bytes) {
return bytes.length >= GZipConstants.MAGIC_BYTES.length &&
bytes[0] == SquashConstants.MAGIC[0] && bytes[1] == SquashConstants.MAGIC[1] &&
bytes[2] == SquashConstants.MAGIC[2] && bytes[3] == SquashConstants.MAGIC[3];
}
/**
* Decompress a metablock into a byte array
* @param reader The BinaryReader pointed to the start of the section
* @param compressionType The compression type if the archive
* @param monitor Monitor to allow the user to cancel the load
* @return A BinaryReader containing ONLY the uncompressed bytes of the section
* @throws IOException Any read operation failure
* @throws CancelledException Archive load was cancelled
*/
public static byte[] decompressBlock(BinaryReader reader, int compressionType,
TaskMonitor monitor) throws IOException, CancelledException {
SquashMetablock header = new SquashMetablock(reader);
// Only perform decompression if the block is compressed
if (header.isCompressed()) {
return decompressBytes(reader, header.getBlockSize(), compressionType, monitor);
}
return reader.readNextByteArray(header.getBlockSize());
}
/**
* Create a BinaryReader from the given byte array
* @param bytes The source bytes
* @return A BinaryReader for the source byte array
*/
public static BinaryReader byteArrayToReader(byte[] bytes) {
ByteProvider newProvider = new ByteArrayProvider(bytes);
return new BinaryReader(newProvider, true /* LE */);
}
/**
* Decompress the given bytes
* @param reader A BinaryReader pointed at the start of the bytes to be decompressed
* @param length The amount of bytes to decompress
* @param compressionType The type of compression being used by the archive
* @param monitor Monitor to allow the user to cancel the load
* @return A byte array containing the decompressed bytes
* @throws IOException Any kind of decompression/read error
* @throws CancelledException Archive load was cancelled
*/
public static byte[] decompressBytes(BinaryReader reader, int length, int compressionType,
TaskMonitor monitor) throws IOException, CancelledException {
// Check if the user cancelled the load
monitor.checkCancelled();
// Create InputStream containing ONLY the source compressed bytes
InputStream is = getSubInputStream(reader, length);
// Convert the InputStream into a decompression stream
try (InputStream decompressedInputStream = getDecompressionStream(is, compressionType)) {
// Decompress and return all bytes from the stream
return decompressedInputStream.readAllBytes();
}
finally {
is.close();
}
}
/**
* Create an InputStream containing only the next n bytes from the given reader
* @param reader A BinaryReader pointed at the start of the bytes to be read
* @param length The amount of bytes to be read
* @return An InputStream containing n bytes
*/
public static InputStream getSubInputStream(BinaryReader reader, long length) {
// Get the start of the stream and advance the reader position
long start = reader.getPointerIndex();
reader.setPointerIndex(start + length);
// Create and the input stream
ByteProvider bp = reader.getByteProvider();
ByteProviderWrapper subBP = new ByteProviderWrapper(bp, start, length);
return new ByteProviderInputStream.ClosingInputStream(subBP);
}
/**
* Convert the given InputStream into the appropriate decompression InputStream for the data
* @param is InputStream containing the compressed source bytes
* @param compressionType The type of compression the archive uses
* @return An appropriate decompression InputStream for the data
* @throws IOException Conversion failed (likely due to unsupported compression algorithm)
*/
public static InputStream getDecompressionStream(InputStream is, int compressionType)
throws IOException {
// Based on the supplied compression type, return the appropriate type of CompressorInputStream
switch (compressionType) {
case SquashConstants.COMPRESSION_TYPE_GZIP:
return new DeflateCompressorInputStream(is);
case SquashConstants.COMPRESSION_TYPE_LZMA:
LZMAInputStream lzmaIn = new LZMAInputStream(is);
lzmaIn.enableRelaxedEndCondition();
return lzmaIn;
case SquashConstants.COMPRESSION_TYPE_LZO:
throw new IOException("LZO compression is not supported");
case SquashConstants.COMPRESSION_TYPE_XZ:
return new XZCompressorInputStream(is);
case SquashConstants.COMPRESSION_TYPE_LZ4:
return new BlockLZ4CompressorInputStream(is);
case SquashConstants.COMPRESSION_TYPE_ZSTD:
throw new IOException("ZSTD compression is not supported");
default:
throw new IOException("Supplied compression type (code: " + compressionType +
") was not recognized. ");
}
}
/**
* Assemble the directory structure of the archive
* @param fragTable The processed fragment table of the archive
* @param dirTable The processed directory table of the archive
* @param inodes The processed inode table of the archive
* @param fsih An index helper
* @param monitor Monitor to allow the user to cancel the load
* @throws CancelledException Archive load was cancelled
* @throws IOException Root inode was not a directory
*/
public static void buildDirectoryStructure(SquashFragmentTable fragTable,
SquashDirectoryTable dirTable, SquashInodeTable inodes,
FileSystemIndexHelper<SquashedFile> fsih, TaskMonitor monitor)
throws CancelledException, IOException {
SquashInode[] inodeArray = inodes.getInodes();
SquashInode rootInode = inodes.getRootInode();
// Make sure the root inode is a directory
if (rootInode != null && rootInode.isDir()) {
// Treat root inode as a directory inode
SquashBasicDirectoryInode dirInode = (SquashBasicDirectoryInode) rootInode;
// For each header associated with the root inode, process all entries
List<SquashDirectoryTableHeader> headers = dirTable.getHeaders(dirInode);
if (headers.size() == 0) {
throw new IOException("Unable to find headers for the root directory");
}
for (SquashDirectoryTableHeader header : headers) {
// For all files/directories immediately under the root
List<SquashDirectoryTableEntry> entries = header.getEntries();
for (SquashDirectoryTableEntry entry : entries) {
// Recurse down the directory tree, storing directories and files
assignPathsRecursively(fragTable, dirTable, entry, inodeArray,
fsih.getRootDir(), fsih, monitor);
}
}
}
else {
// If root is NOT a directory, stop processing
throw new IOException("Root inode was not a directory!");
}
}
/**
* Recursively assign paths to each of the inodes
* @param dirTable The processed directory table of the archive
* @param entry The directory table entry currently being processed
* @param inodes An array of inodes within the archive
* @param parentDir The parent of the current entry
* @param fsih An index helper
* @param monitor Monitor to allow the user to cancel the load
* @throws CancelledException Archive load was cancelled
* @throws IOException Entry found with an invalid inode number
*/
private static void assignPathsRecursively(SquashFragmentTable fragTable,
SquashDirectoryTable dirTable, SquashDirectoryTableEntry entry, SquashInode[] inodes,
GFile parentDir, FileSystemIndexHelper<SquashedFile> fsih, TaskMonitor monitor)
throws CancelledException, IOException {
// Check if the user cancelled the load
monitor.checkCancelled();
// Validate the inode number of the current entry
if (entry == null || entry.getInodeNumber() < 1 || entry.getInodeNumber() > inodes.length) {
throw new IOException(
"Entry found with invalid inode number: " + entry.getInodeNumber());
}
// Get the inode for the current entry
SquashInode inode = inodes[entry.getInodeNumber()];
// If the inode is a directory, recurse downward. Otherwise, just store the file
if (inode.isDir()) {
// Treat as directory inode
SquashBasicDirectoryInode dirInode = (SquashBasicDirectoryInode) inode;
// Create and store a "file" representing the current directory
SquashedFile squashedDirFile = new SquashedFile(dirInode, null);
GFile dirGFile = fsih.storeFileWithParent(entry.getFileName(), parentDir,
inode.getNumber(), true, -1, squashedDirFile);
// Get the directory headers for the current inode and process each entry within them
List<SquashDirectoryTableHeader> headers = dirTable.getHeaders(dirInode);
for (SquashDirectoryTableHeader header : headers) {
// For each sub-directory, recurse downward and add each file/directory encountered
List<SquashDirectoryTableEntry> entries = header.getEntries();
for (SquashDirectoryTableEntry currentEntry : entries) {
assignPathsRecursively(fragTable, dirTable, currentEntry, inodes, dirGFile,
fsih, monitor);
}
}
}
else if (inode.isFile()) {
// Treat as file inode
SquashBasicFileInode fileInode = (SquashBasicFileInode) inode;
SquashFragment fragment = fragTable.getFragment(fileInode.getFragmentIndex());
// Store the current file
fsih.storeFileWithParent(entry.getFileName(), parentDir, fileInode.getNumber(), false,
fileInode.getFileSize(), new SquashedFile(fileInode, fragment));
}
else if (inode.isSymLink()) {
// Treat as symbolic link inode
SquashSymlinkInode symLinkInode = (SquashSymlinkInode) inode;
// Store symlink as file. Lookup handled when getting ByteProvider
fsih.storeFileWithParent(entry.getFileName(), parentDir, symLinkInode.getNumber(),
false, 0, new SquashedFile(symLinkInode, null));
}
else {
Msg.info(SquashUtils.class,
"Inode #" + inode.getNumber() + " is not a file or directory. Skipping...");
}
}
}

View File

@ -0,0 +1,79 @@
/* ###
* IP: GHIDRA
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ghidra.file.formats.squashfs;
public class SquashedFile {
// The name of this file/directory
private final String name;
// The path to this file/directory
private final String filePath;
// The inode representing this file/directory
private final SquashInode inode;
// The fragment holding the tail end of the file (null if not)
private final SquashFragment fragment;
// The total uncompressed size of the file (-1 for directories)
private final long size;
/**
* Represents a file or directory within a SquashFS archive
* @param fileInode The inode representing this file/directory
* @param tailEndFragment Fragment holding the tail end of the file
*/
public SquashedFile(SquashInode fileInode, SquashFragment tailEndFragment) {
name = fileInode.getDirectoryTableEntry().getFileName();
filePath = fileInode.getDirectoryTableEntry().getPath();
inode = fileInode;
fragment = tailEndFragment;
if (inode.isFile()) {
SquashBasicFileInode castInode = (SquashBasicFileInode) inode;
size = castInode.getFileSize();
}
else {
size = -1;
}
}
public String getName() {
return name;
}
public String getPath() {
return filePath;
}
public SquashInode getInode() {
return inode;
}
public long getUncompressedSize() {
return size;
}
public boolean hasFragment() {
return fragment != null;
}
public SquashFragment getFragment() {
return fragment;
}
}