use of org.apache.cassandra.streaming.compress.CompressedInputStream in project cassandra by apache.
the class CompressedInputStreamTest method testCompressedReadWith.
/**
* @param valuesToCheck array of longs of range(0-999)
* @throws Exception
*/
private void testCompressedReadWith(long[] valuesToCheck, boolean testTruncate, boolean testException, double minCompressRatio) throws Exception {
assert valuesToCheck != null && valuesToCheck.length > 0;
// write compressed data file of longs
File parentDir = new File(System.getProperty("java.io.tmpdir"));
Descriptor desc = new Descriptor(parentDir, "ks", "cf", 1);
File tmp = new File(desc.filenameFor(Component.DATA));
MetadataCollector collector = new MetadataCollector(new ClusteringComparator(BytesType.instance));
CompressionParams param = CompressionParams.snappy(32, minCompressRatio);
Map<Long, Long> index = new HashMap<Long, Long>();
try (CompressedSequentialWriter writer = new CompressedSequentialWriter(tmp, desc.filenameFor(Component.COMPRESSION_INFO), null, SequentialWriterOption.DEFAULT, param, collector)) {
for (long l = 0L; l < 1000; l++) {
index.put(l, writer.position());
writer.writeLong(l);
}
writer.finish();
}
CompressionMetadata comp = CompressionMetadata.create(tmp.getAbsolutePath());
List<Pair<Long, Long>> sections = new ArrayList<>();
for (long l : valuesToCheck) {
long position = index.get(l);
sections.add(Pair.create(position, position + 8));
}
CompressionMetadata.Chunk[] chunks = comp.getChunksForSections(sections);
long totalSize = comp.getTotalSizeForSections(sections);
long expectedSize = 0;
for (CompressionMetadata.Chunk c : chunks) expectedSize += c.length + 4;
assertEquals(expectedSize, totalSize);
// buffer up only relevant parts of file
int size = 0;
for (CompressionMetadata.Chunk c : chunks) // 4bytes CRC
size += (c.length + 4);
byte[] toRead = new byte[size];
try (RandomAccessFile f = new RandomAccessFile(tmp, "r")) {
int pos = 0;
for (CompressionMetadata.Chunk c : chunks) {
f.seek(c.offset);
pos += f.read(toRead, pos, c.length + 4);
}
}
if (testTruncate) {
byte[] actuallyRead = new byte[50];
System.arraycopy(toRead, 0, actuallyRead, 0, 50);
toRead = actuallyRead;
}
// read buffer using CompressedInputStream
CompressionInfo info = new CompressionInfo(chunks, param);
if (testException) {
testException(sections, info);
return;
}
CompressedInputStream input = new CompressedInputStream(new ByteArrayInputStream(toRead), info, ChecksumType.CRC32, () -> 1.0);
try (DataInputStream in = new DataInputStream(input)) {
for (int i = 0; i < sections.size(); i++) {
input.position(sections.get(i).left);
long readValue = in.readLong();
assertEquals("expected " + valuesToCheck[i] + " but was " + readValue, valuesToCheck[i], readValue);
}
}
}
use of org.apache.cassandra.streaming.compress.CompressedInputStream in project cassandra by apache.
the class CompressedInputStreamTest method testException.
private static void testException(List<Pair<Long, Long>> sections, CompressionInfo info) throws IOException {
CompressedInputStream input = new CompressedInputStream(new ByteArrayInputStream(new byte[0]), info, ChecksumType.CRC32, () -> 1.0);
try (DataInputStream in = new DataInputStream(input)) {
for (int i = 0; i < sections.size(); i++) {
input.position(sections.get(i).left);
try {
in.readLong();
fail("Should have thrown IOException");
} catch (IOException e) {
continue;
}
}
}
}
Aggregations