use of io.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class CompressedIntsIndexedWriterTest method checkV2SerializedSizeAndData.
private void checkV2SerializedSizeAndData(int chunkFactor) throws Exception {
File tmpDirectory = Files.createTempDirectory(String.format("CompressedIntsIndexedWriterTest_%d", chunkFactor)).toFile();
FileSmoosher smoosher = new FileSmoosher(tmpDirectory);
final IOPeon ioPeon = new TmpFileIOPeon();
try {
CompressedIntsIndexedWriter writer = new CompressedIntsIndexedWriter(chunkFactor, compressionStrategy, new GenericIndexedWriter<>(ioPeon, "test", CompressedIntBufferObjectStrategy.getBufferForOrder(byteOrder, compressionStrategy, chunkFactor), Longs.BYTES * 10000));
writer.open();
for (int val : vals) {
writer.add(val);
}
writer.close();
final SmooshedWriter channel = smoosher.addWithSmooshedWriter("test", writer.getSerializedSize());
writer.writeToChannel(channel, smoosher);
channel.close();
smoosher.close();
SmooshedFileMapper mapper = Smoosh.map(tmpDirectory);
// read from ByteBuffer and check values
CompressedIntsIndexedSupplier supplierFromByteBuffer = CompressedIntsIndexedSupplier.fromByteBuffer(mapper.mapFile("test"), byteOrder, mapper);
IndexedInts indexedInts = supplierFromByteBuffer.get();
assertEquals(vals.length, indexedInts.size());
for (int i = 0; i < vals.length; ++i) {
assertEquals(vals[i], indexedInts.get(i));
}
CloseQuietly.close(indexedInts);
mapper.close();
} finally {
ioPeon.close();
}
}
use of io.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class CompressedVSizeIndexedV3WriterTest method checkSerializedSizeAndData.
private void checkSerializedSizeAndData(int offsetChunkFactor, int valueChunkFactor) throws Exception {
FileSmoosher smoosher = new FileSmoosher(FileUtils.getTempDirectory());
final IOPeon ioPeon = new TmpFileIOPeon();
final IndexedMultivalue<IndexedInts> indexedMultivalue;
try {
int maxValue = vals.size() > 0 ? getMaxValue(vals) : 0;
CompressedIntsIndexedWriter offsetWriter = new CompressedIntsIndexedWriter(ioPeon, "offset", offsetChunkFactor, byteOrder, compressionStrategy);
CompressedVSizeIntsIndexedWriter valueWriter = new CompressedVSizeIntsIndexedWriter(ioPeon, "value", maxValue, valueChunkFactor, byteOrder, compressionStrategy);
CompressedVSizeIndexedV3Writer writer = new CompressedVSizeIndexedV3Writer(offsetWriter, valueWriter);
CompressedVSizeIndexedV3Supplier supplierFromIterable = CompressedVSizeIndexedV3Supplier.fromIterable(Iterables.transform(vals, new Function<int[], IndexedInts>() {
@Nullable
@Override
public IndexedInts apply(@Nullable final int[] input) {
return ArrayBasedIndexedInts.of(input);
}
}), offsetChunkFactor, maxValue, byteOrder, compressionStrategy);
writer.open();
for (int[] val : vals) {
writer.add(val);
}
writer.close();
long writtenLength = writer.getSerializedSize();
final WritableByteChannel outputChannel = Channels.newChannel(ioPeon.makeOutputStream("output"));
writer.writeToChannel(outputChannel, smoosher);
outputChannel.close();
smoosher.close();
assertEquals(writtenLength, supplierFromIterable.getSerializedSize());
// read from ByteBuffer and check values
CompressedVSizeIndexedV3Supplier supplierFromByteBuffer = CompressedVSizeIndexedV3Supplier.fromByteBuffer(ByteBuffer.wrap(IOUtils.toByteArray(ioPeon.makeInputStream("output"))), byteOrder, null);
indexedMultivalue = supplierFromByteBuffer.get();
assertEquals(indexedMultivalue.size(), vals.size());
for (int i = 0; i < vals.size(); ++i) {
IndexedInts subVals = indexedMultivalue.get(i);
assertEquals(subVals.size(), vals.get(i).length);
for (int j = 0; j < subVals.size(); ++j) {
assertEquals(subVals.get(j), vals.get(i)[j]);
}
}
CloseQuietly.close(indexedMultivalue);
} finally {
ioPeon.close();
}
}
use of io.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class CompressedVSizeIntsIndexedWriterTest method checkSerializedSizeAndData.
private void checkSerializedSizeAndData(int chunkSize) throws Exception {
FileSmoosher smoosher = new FileSmoosher(FileUtils.getTempDirectory());
CompressedVSizeIntsIndexedWriter writer = new CompressedVSizeIntsIndexedWriter(ioPeon, "test", vals.length > 0 ? Ints.max(vals) : 0, chunkSize, byteOrder, compressionStrategy);
CompressedVSizeIntsIndexedSupplier supplierFromList = CompressedVSizeIntsIndexedSupplier.fromList(Ints.asList(vals), vals.length > 0 ? Ints.max(vals) : 0, chunkSize, byteOrder, compressionStrategy);
writer.open();
for (int val : vals) {
writer.add(val);
}
writer.close();
long writtenLength = writer.getSerializedSize();
final WritableByteChannel outputChannel = Channels.newChannel(ioPeon.makeOutputStream("output"));
writer.writeToChannel(outputChannel, smoosher);
outputChannel.close();
smoosher.close();
assertEquals(writtenLength, supplierFromList.getSerializedSize());
// read from ByteBuffer and check values
CompressedVSizeIntsIndexedSupplier supplierFromByteBuffer = CompressedVSizeIntsIndexedSupplier.fromByteBuffer(ByteBuffer.wrap(IOUtils.toByteArray(ioPeon.makeInputStream("output"))), byteOrder, null);
IndexedInts indexedInts = supplierFromByteBuffer.get();
for (int i = 0; i < vals.length; ++i) {
assertEquals(vals[i], indexedInts.get(i));
}
CloseQuietly.close(indexedInts);
}
Aggregations