use of info.ata4.io.DataReader in project disunity by ata4.
the class SerializedFileReader method readMetadata.
private void readMetadata(DataReader in) throws IOException {
SerializedFileMetadata metadata = serialized.metadata();
SerializedFileHeader header = serialized.header();
DataBlock metadataBlock = serialized.metadataBlock();
metadataBlock.markBegin(in);
metadata.version(header.version());
in.readStruct(metadata);
metadataBlock.markEnd(in);
L.log(Level.FINER, "metadataBlock: {0}", metadataBlock);
}
use of info.ata4.io.DataReader in project disunity by ata4.
the class SerializedFileReader method readObjects.
private void readObjects(DataReader in) throws IOException {
long ofsMin = Long.MAX_VALUE;
long ofsMax = Long.MIN_VALUE;
SerializedFileHeader header = serialized.header();
SerializedFileMetadata metadata = serialized.metadata();
Map<Long, ObjectInfo> objectInfoMap = metadata.objectInfoTable().infoMap();
Map<Integer, TypeRoot<Type>> typeTreeMap = metadata.typeTree().typeMap();
List<SerializedObjectData> objectData = serialized.objectData();
for (Map.Entry<Long, ObjectInfo> infoEntry : objectInfoMap.entrySet()) {
ObjectInfo info = infoEntry.getValue();
long id = infoEntry.getKey();
long ofs = header.dataOffset() + info.offset();
ofsMin = Math.min(ofsMin, ofs);
ofsMax = Math.max(ofsMax, ofs + info.length());
SerializedObjectData object = new SerializedObjectData(id);
object.info(info);
// create and read object data buffer
ByteBuffer buf = ByteBufferUtils.allocate((int) info.length());
in.position(ofs);
in.readBuffer(buf);
object.buffer(buf);
// get type tree if possible
TypeRoot typeRoot = typeTreeMap.get(info.typeID());
if (typeRoot != null) {
object.typeTree(typeRoot.nodes());
}
objectData.add(object);
}
DataBlock objectDataBlock = serialized.objectDataBlock();
objectDataBlock.offset(ofsMin);
objectDataBlock.endOffset(ofsMax);
L.log(Level.FINER, "objectDataBlock: {0}", objectDataBlock);
}
use of info.ata4.io.DataReader in project disunity by ata4.
the class SerializedFileReader method readHeader.
private void readHeader(DataReader in) throws IOException {
DataBlock headerBlock = serialized.headerBlock();
headerBlock.markBegin(in);
in.readStruct(serialized.header());
headerBlock.markEnd(in);
L.log(Level.FINER, "headerBlock: {0}", headerBlock);
}
use of info.ata4.io.DataReader in project disunity by ata4.
the class BundleHeader method read.
@Override
public void read(DataReader in) throws IOException {
signature = in.readStringNull();
streamVersion = in.readInt();
unityVersion = new UnityVersion(in.readStringNull());
unityRevision = new UnityVersion(in.readStringNull());
if (signature.equals(SIGNATURE_FS)) {
// FS signature
// Expect streamVersion == 6
completeFileSize = in.readLong();
compressedDataHeaderSize = in.readInt();
dataHeaderSize = in.readInt();
flags = in.readInt();
headerSize = (int) in.position();
if ((flags & 0x80) == 0) {
// The data header is part of the bundle header
headerSize += compressedDataHeaderSize;
}
// else it's at the end of the file
} else {
// Web or Raw signature
minimumStreamedBytes = in.readUnsignedInt();
headerSize = in.readInt();
numberOfLevelsToDownload = in.readInt();
int numberOfLevels = in.readInt();
levelByteEnd.clear();
for (int i = 0; i < numberOfLevels; i++) {
levelByteEnd.add(new ImmutablePair(in.readUnsignedInt(), in.readUnsignedInt()));
}
if (streamVersion >= 2) {
completeFileSize = in.readUnsignedInt();
}
if (streamVersion >= 3) {
dataHeaderSize = in.readUnsignedInt();
}
in.readByte();
}
}
use of info.ata4.io.DataReader in project disunity by ata4.
the class BundleReader method read.
public Bundle read() throws BundleException, IOException {
bundle = new Bundle();
in.position(0);
BundleHeader header = bundle.header();
in.readStruct(header);
// check signature
if (!header.hasValidSignature()) {
throw new BundleException("Invalid signature");
}
List<BundleEntryInfo> entryInfos = bundle.entryInfos();
if (header.compressedDataHeaderSize() > 0) {
if (header.dataHeaderAtEndOfFile()) {
in.position(header.completeFileSize() - header.compressedDataHeaderSize());
}
// build an input stream for the uncompressed data header
InputStream headerIn = new BoundedInputStream(in.stream(), header.compressedDataHeaderSize());
DataReader inData;
switch(header.dataHeaderCompressionScheme()) {
default:
case 0:
// Not compressed
inData = DataReaders.forInputStream(headerIn);
case 1:
// LZMA
inData = DataReaders.forInputStream(new CountingInputStream(new LzmaInputStream(headerIn)));
case 3:
// LZ4
byte[] compressed = new byte[header.compressedDataHeaderSize()];
byte[] decompressed = new byte[(int) header.dataHeaderSize()];
headerIn.read(compressed);
LZ4JavaSafeFastDecompressor.INSTANCE.decompress(compressed, decompressed);
inData = DataReaders.forByteBuffer(ByteBuffer.wrap(decompressed));
}
// Block info: not captured for now
{
// 16 bytes unknown
byte[] unknown = new byte[16];
inData.readBytes(unknown);
int storageBlocks = inData.readInt();
for (int i = 0; i < storageBlocks; ++i) {
inData.readUnsignedInt();
inData.readUnsignedInt();
inData.readUnsignedShort();
}
}
int files = inData.readInt();
for (int i = 0; i < files; i++) {
BundleEntryInfo entryInfo = new BundleEntryInfoFS();
inData.readStruct(entryInfo);
entryInfos.add(entryInfo);
}
} else {
// raw or web header
long dataHeaderSize = header.dataHeaderSize();
if (dataHeaderSize == 0) {
// old stream versions don't store the data header size, so use a large
// fixed number instead
dataHeaderSize = 4096;
}
InputStream is = dataInputStream(0, dataHeaderSize);
DataReader inData = DataReaders.forInputStream(is);
int files = inData.readInt();
for (int i = 0; i < files; i++) {
BundleEntryInfo entryInfo = new BundleEntryInfo();
inData.readStruct(entryInfo);
entryInfos.add(entryInfo);
}
}
// sort entries by offset so that they're in the order in which they
// appear in the file, which is convenient for compressed bundles
entryInfos.sort((a, b) -> Long.compare(a.offset(), b.offset()));
List<BundleEntry> entries = bundle.entries();
entryInfos.forEach(entryInfo -> {
entries.add(new BundleInternalEntry(entryInfo, this::inputStreamForEntry));
});
return bundle;
}
Aggregations