use of net.runelite.cache.util.Crc32 in project runelite by runelite.
the class Container method decompress.
public static Container decompress(byte[] b, int[] keys) throws IOException {
InputStream stream = new InputStream(b);
int compression = stream.readUnsignedByte();
int compressedLength = stream.readInt();
if (compressedLength < 0 || compressedLength > 1000000) {
throw new RuntimeException("Invalid data");
}
Crc32 crc32 = new Crc32();
// compression + length
crc32.update(b, 0, 5);
byte[] data;
int revision = -1;
switch(compression) {
case CompressionType.NONE:
{
byte[] encryptedData = new byte[compressedLength];
stream.readBytes(encryptedData, 0, compressedLength);
crc32.update(encryptedData, 0, compressedLength);
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
if (stream.remaining() >= 2) {
revision = stream.readUnsignedShort();
assert revision != -1;
}
data = decryptedData;
break;
}
case CompressionType.BZ2:
{
byte[] encryptedData = new byte[compressedLength + 4];
stream.readBytes(encryptedData);
crc32.update(encryptedData, 0, encryptedData.length);
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
if (stream.remaining() >= 2) {
revision = stream.readUnsignedShort();
assert revision != -1;
}
stream = new InputStream(decryptedData);
int decompressedLength = stream.readInt();
data = BZip2.decompress(stream.getRemaining(), compressedLength);
if (data == null) {
return null;
}
assert data.length == decompressedLength;
break;
}
case CompressionType.GZ:
{
byte[] encryptedData = new byte[compressedLength + 4];
stream.readBytes(encryptedData);
crc32.update(encryptedData, 0, encryptedData.length);
byte[] decryptedData = decrypt(encryptedData, encryptedData.length, keys);
if (stream.remaining() >= 2) {
revision = stream.readUnsignedShort();
assert revision != -1;
}
stream = new InputStream(decryptedData);
int decompressedLength = stream.readInt();
data = GZip.decompress(stream.getRemaining(), compressedLength);
if (data == null) {
return null;
}
assert data.length == decompressedLength;
break;
}
default:
throw new RuntimeException("Unknown decompression type");
}
Container container = new Container(compression, revision);
container.data = data;
container.crc = crc32.getHash();
return container;
}
use of net.runelite.cache.util.Crc32 in project runelite by runelite.
the class DiskStorage method saveIndex.
private void saveIndex(Index index) throws IOException {
IndexData indexData = index.toIndexData();
byte[] data = indexData.writeIndexData();
// index data revision is always -1
Container container = new Container(index.getCompression(), -1);
container.compress(data, null);
byte[] compressedData = container.data;
DataFileWriteResult res = this.data.write(index255.getIndexFileId(), index.getId(), compressedData);
index255.write(new IndexEntry(index255, index.getId(), res.sector, res.compressedLength));
Crc32 crc = new Crc32();
crc.update(compressedData, 0, compressedData.length);
index.setCrc(crc.getHash());
}
use of net.runelite.cache.util.Crc32 in project runelite by runelite.
the class DiskStorage method saveArchive.
@Override
public void saveArchive(Archive a, byte[] archiveData) throws IOException {
Index index = a.getIndex();
IndexFile indexFile = getIndex(index.getId());
assert indexFile.getIndexFileId() == index.getId();
DataFileWriteResult res = data.write(index.getId(), a.getArchiveId(), archiveData);
indexFile.write(new IndexEntry(indexFile, a.getArchiveId(), res.sector, res.compressedLength));
byte compression = archiveData[0];
int compressedSize = Ints.fromBytes(archiveData[1], archiveData[2], archiveData[3], archiveData[4]);
// don't crc the appended revision, if it is there
int length = // compression type
1 + // compressed size
4 + compressedSize + (compression != CompressionType.NONE ? 4 : 0);
Crc32 crc = new Crc32();
crc.update(archiveData, 0, length);
a.setCrc(crc.getHash());
logger.trace("Saved archive {}/{} at sector {}, compressed length {}", index.getId(), a.getArchiveId(), res.sector, res.compressedLength);
}
use of net.runelite.cache.util.Crc32 in project runelite by runelite.
the class CacheClient method download.
public void download() throws IOException {
Stopwatch stopwatch = Stopwatch.createStarted();
List<IndexInfo> indexes = requestIndexes();
for (IndexInfo indexInfo : indexes) {
int i = indexInfo.getId();
int crc = indexInfo.getCrc();
int revision = indexInfo.getRevision();
Index index = store.findIndex(i);
if (index == null) {
logger.info("Index {} does not exist, creating", i);
} else if (index.getRevision() != revision) {
if (revision < index.getRevision()) {
logger.warn("Index {} revision is going BACKWARDS! (our revision {}, their revision {})", index.getId(), index.getRevision(), revision);
} else {
logger.info("Index {} has the wrong revision (our revision {}, their revision {})", index.getId(), index.getRevision(), revision);
}
} else if (index.getCrc() != crc) {
logger.warn("Index {} CRC has changed! (our crc {}, their crc {})", index.getCrc(), index.getCrc(), crc);
} else {
// despite the index being up to date, not everything
// can be downloaded, eg. for tracks.
logger.info("Index {} is up to date", index.getId());
}
logger.info("Downloading index {}", i);
FileResult indexFileResult = requestFile(255, i, true).join();
indexFileResult.decompress(null);
logger.info("Downloaded index {}", i);
if (indexFileResult.getCrc() != crc) {
logger.warn("Corrupted download for index {}", i);
continue;
}
IndexData indexData = new IndexData();
indexData.load(indexFileResult.getContents());
if (index == null) {
index = store.addIndex(i);
}
// update index settings
index.setProtocol(indexData.getProtocol());
index.setNamed(indexData.isNamed());
index.setCrc(crc);
index.setRevision(revision);
logger.info("Index {} has {} archives", i, indexData.getArchives().length);
for (ArchiveData ad : indexData.getArchives()) {
Archive existing = index.getArchive(ad.getId());
if (existing != null && existing.getRevision() == ad.getRevision() && existing.getCrc() == ad.getCrc() && existing.getNameHash() == ad.getNameHash()) {
logger.debug("Archive {}/{} in index {} is up to date", ad.getId(), indexData.getArchives().length, index.getId());
continue;
}
if (existing == null) {
logger.info("Archive {}/{} in index {} is out of date, downloading", ad.getId(), indexData.getArchives().length, index.getId());
} else if (ad.getRevision() < existing.getRevision()) {
logger.warn("Archive {}/{} in index {} revision is going BACKWARDS! (our revision {}, their revision {})", ad.getId(), indexData.getArchives().length, index.getId(), existing.getRevision(), ad.getRevision());
} else {
logger.info("Archive {}/{} in index {} is out of date, downloading. " + "revision: ours: {} theirs: {}, crc: ours: {} theirs {}, name: ours {} theirs {}", ad.getId(), indexData.getArchives().length, index.getId(), existing.getRevision(), ad.getRevision(), existing.getCrc(), ad.getCrc(), existing.getNameHash(), ad.getNameHash());
}
final Archive archive = existing == null ? index.addArchive(ad.getId()) : existing;
archive.setRevision(ad.getRevision());
archive.setCrc(ad.getCrc());
archive.setNameHash(ad.getNameHash());
// Add files
archive.setFileData(ad.getFiles());
CompletableFuture<FileResult> future = requestFile(index.getId(), ad.getId(), false);
future.handle((fr, ex) -> {
byte[] data = fr.getCompressedData();
Crc32 crc32 = new Crc32();
crc32.update(data, 0, data.length);
int hash = crc32.getHash();
if (hash != archive.getCrc()) {
logger.warn("crc mismatch on downloaded archive {}/{}: {} != {}", archive.getIndex().getId(), archive.getArchiveId(), hash, archive.getCrc());
throw new RuntimeException("crc mismatch");
}
if (watcher != null) {
watcher.downloadComplete(archive, data);
} else {
try {
Storage storage = store.getStorage();
storage.saveArchive(archive, data);
} catch (IOException ex1) {
logger.warn("unable to save archive data", ex1);
}
}
return null;
});
}
}
// flush any pending requests
channel.flush();
while (!requests.isEmpty()) {
// wait for pending requests
synchronized (this) {
try {
wait();
} catch (InterruptedException ex) {
logger.warn(null, ex);
}
}
}
stopwatch.stop();
logger.info("Download completed in {}", stopwatch);
}
Aggregations