use of org.apache.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class V3CompressedVSizeColumnarMultiIntsSerializerTest method generateV2SerializedSizeAndData.
private void generateV2SerializedSizeAndData(long numRows, int maxValue, int maxValuesPerRow, int offsetChunkFactor, int valueChunkFactor) throws Exception {
File tmpDirectory = FileUtils.createTempDir(StringUtils.format("CompressedVSizeIndexedV3WriterTest_%d_%d", offsetChunkFactor, offsetChunkFactor));
FileSmoosher smoosher = new FileSmoosher(tmpDirectory);
try (SegmentWriteOutMedium segmentWriteOutMedium = TmpFileSegmentWriteOutMediumFactory.instance().makeSegmentWriteOutMedium(temporaryFolder.newFolder())) {
CompressedColumnarIntsSerializer offsetWriter = new CompressedColumnarIntsSerializer(TEST_COLUMN_NAME, segmentWriteOutMedium, offsetChunkFactor, byteOrder, compressionStrategy, GenericIndexedWriter.ofCompressedByteBuffers(segmentWriteOutMedium, "offset", compressionStrategy, Long.BYTES * 250000));
GenericIndexedWriter genericIndexed = GenericIndexedWriter.ofCompressedByteBuffers(segmentWriteOutMedium, "value", compressionStrategy, Long.BYTES * 250000);
CompressedVSizeColumnarIntsSerializer valueWriter = new CompressedVSizeColumnarIntsSerializer(TEST_COLUMN_NAME, segmentWriteOutMedium, maxValue, valueChunkFactor, byteOrder, compressionStrategy, genericIndexed);
V3CompressedVSizeColumnarMultiIntsSerializer writer = new V3CompressedVSizeColumnarMultiIntsSerializer(TEST_COLUMN_NAME, offsetWriter, valueWriter);
writer.open();
for (long l = 0L; l < numRows; l++) {
writer.addValues(new ArrayBasedIndexedInts(generateRow(rand, maxValue, maxValuesPerRow)));
}
final SmooshedWriter channel = smoosher.addWithSmooshedWriter("test", writer.getSerializedSize());
writer.writeTo(channel, smoosher);
channel.close();
smoosher.close();
SmooshedFileMapper mapper = Smoosh.map(tmpDirectory);
V3CompressedVSizeColumnarMultiIntsSupplier supplierFromByteBuffer = V3CompressedVSizeColumnarMultiIntsSupplier.fromByteBuffer(mapper.mapFile("test"), byteOrder, mapper);
ColumnarMultiInts columnarMultiInts = supplierFromByteBuffer.get();
Assert.assertEquals(columnarMultiInts.size(), numRows);
Random verifier = new Random(0);
for (int i = 0; i < numRows; ++i) {
IndexedInts subVals = columnarMultiInts.get(i);
int[] expected = generateRow(verifier, maxValue, maxValuesPerRow);
Assert.assertEquals(subVals.size(), expected.length);
for (int j = 0, size = subVals.size(); j < size; ++j) {
Assert.assertEquals(subVals.get(j), expected[j]);
}
}
CloseableUtils.closeAll(columnarMultiInts, mapper);
}
}
use of org.apache.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class CompressedVSizeColumnarIntsSerializerTest method checkSerializedSizeAndData.
private void checkSerializedSizeAndData(int chunkSize) throws Exception {
FileSmoosher smoosher = new FileSmoosher(temporaryFolder.newFolder());
final String columnName = "test";
CompressedVSizeColumnarIntsSerializer writer = new CompressedVSizeColumnarIntsSerializer(columnName, segmentWriteOutMedium, "test", vals.length > 0 ? Ints.max(vals) : 0, chunkSize, byteOrder, compressionStrategy);
CompressedVSizeColumnarIntsSupplier supplierFromList = CompressedVSizeColumnarIntsSupplier.fromList(IntArrayList.wrap(vals), vals.length > 0 ? Ints.max(vals) : 0, chunkSize, byteOrder, compressionStrategy, segmentWriteOutMedium.getCloser());
writer.open();
for (int val : vals) {
writer.addValue(val);
}
long writtenLength = writer.getSerializedSize();
final WriteOutBytes writeOutBytes = segmentWriteOutMedium.makeWriteOutBytes();
writer.writeTo(writeOutBytes, smoosher);
smoosher.close();
Assert.assertEquals(writtenLength, supplierFromList.getSerializedSize());
// read from ByteBuffer and check values
CompressedVSizeColumnarIntsSupplier supplierFromByteBuffer = CompressedVSizeColumnarIntsSupplier.fromByteBuffer(ByteBuffer.wrap(IOUtils.toByteArray(writeOutBytes.asInputStream())), byteOrder);
ColumnarInts columnarInts = supplierFromByteBuffer.get();
for (int i = 0; i < vals.length; ++i) {
Assert.assertEquals(vals[i], columnarInts.get(i));
}
CloseableUtils.closeAndWrapExceptions(columnarInts);
}
use of org.apache.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class GenericIndexedWriterTest method writeLargeValueIntoLargeColumn.
@Test
public void writeLargeValueIntoLargeColumn() throws IOException {
// Regression test for https://github.com/apache/druid/issues/9027.
final GenericIndexedWriter<String> writer = new GenericIndexedWriter<>(new OnHeapMemorySegmentWriteOutMedium(), "test", GenericIndexed.STRING_STRATEGY, 100);
writer.setIntMaxForCasting(150);
writer.open();
writer.write("i really like writing strings");
writer.write("i really like writing strings");
writer.write("i really like writing strings i really like writing strings i really like writing strings");
writer.write("i really like writing strings");
writer.writeTo(FileChannel.open(temporaryFolder.newFile().toPath(), StandardOpenOption.WRITE), new FileSmoosher(temporaryFolder.newFolder()));
}
use of org.apache.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class LargeColumnSupportedComplexColumnSerializerTest method testSanity.
@Test
public void testSanity() throws IOException {
HyperUniquesSerdeForTest serde = new HyperUniquesSerdeForTest(Hashing.murmur3_128());
int[] cases = { 1000, 5000, 10000, 20000 };
int[] columnSizes = { Integer.MAX_VALUE, Integer.MAX_VALUE / 2, Integer.MAX_VALUE / 4, 5000 * Long.BYTES, 2500 * Long.BYTES };
for (int columnSize : columnSizes) {
for (int aCase : cases) {
File tmpFile = temporaryFolder.newFolder();
HyperLogLogCollector baseCollector = HyperLogLogCollector.makeLatestCollector();
try (SegmentWriteOutMedium segmentWriteOutMedium = new OffHeapMemorySegmentWriteOutMedium();
FileSmoosher v9Smoosher = new FileSmoosher(tmpFile)) {
LargeColumnSupportedComplexColumnSerializer serializer = LargeColumnSupportedComplexColumnSerializer.createWithColumnSize(segmentWriteOutMedium, "test", serde.getObjectStrategy(), columnSize);
serializer.open();
for (int i = 0; i < aCase; i++) {
HyperLogLogCollector collector = HyperLogLogCollector.makeLatestCollector();
byte[] hashBytes = fn.hashLong(i).asBytes();
collector.add(hashBytes);
baseCollector.fold(collector);
serializer.serialize(new ObjectColumnSelector() {
@Nullable
@Override
public Object getObject() {
return collector;
}
@Override
public Class classOfObject() {
return HyperLogLogCollector.class;
}
@Override
public void inspectRuntimeShape(RuntimeShapeInspector inspector) {
// doesn't matter in tests
}
});
}
try (final SmooshedWriter channel = v9Smoosher.addWithSmooshedWriter("test", serializer.getSerializedSize())) {
serializer.writeTo(channel, v9Smoosher);
}
}
SmooshedFileMapper mapper = Smoosh.map(tmpFile);
final ColumnBuilder builder = new ColumnBuilder().setType(ValueType.COMPLEX).setHasMultipleValues(false).setFileMapper(mapper);
serde.deserializeColumn(mapper.mapFile("test"), builder, null);
ColumnHolder columnHolder = builder.build();
ComplexColumn complexColumn = (ComplexColumn) columnHolder.getColumn();
HyperLogLogCollector collector = HyperLogLogCollector.makeLatestCollector();
for (int i = 0; i < aCase; i++) {
collector.fold((HyperLogLogCollector) complexColumn.getRowValue(i));
}
Assert.assertEquals(baseCollector.estimateCardinality(), collector.estimateCardinality(), 0.0);
}
}
}
use of org.apache.druid.java.util.common.io.smoosh.FileSmoosher in project druid by druid-io.
the class CompressedColumnarIntsSerializerTest method checkSerializedSizeAndData.
private void checkSerializedSizeAndData(int chunkFactor) throws Exception {
FileSmoosher smoosher = new FileSmoosher(temporaryFolder.newFolder());
CompressedColumnarIntsSerializer writer = new CompressedColumnarIntsSerializer("test", segmentWriteOutMedium, "test", chunkFactor, byteOrder, compressionStrategy);
CompressedColumnarIntsSupplier supplierFromList = CompressedColumnarIntsSupplier.fromList(IntArrayList.wrap(vals), chunkFactor, byteOrder, compressionStrategy, segmentWriteOutMedium.getCloser());
writer.open();
for (int val : vals) {
writer.addValue(val);
}
long writtenLength = writer.getSerializedSize();
final WriteOutBytes writeOutBytes = segmentWriteOutMedium.makeWriteOutBytes();
writer.writeTo(writeOutBytes, smoosher);
smoosher.close();
Assert.assertEquals(writtenLength, supplierFromList.getSerializedSize());
// read from ByteBuffer and check values
CompressedColumnarIntsSupplier supplierFromByteBuffer = CompressedColumnarIntsSupplier.fromByteBuffer(ByteBuffer.wrap(IOUtils.toByteArray(writeOutBytes.asInputStream())), byteOrder);
ColumnarInts columnarInts = supplierFromByteBuffer.get();
Assert.assertEquals(vals.length, columnarInts.size());
for (int i = 0; i < vals.length; ++i) {
Assert.assertEquals(vals[i], columnarInts.get(i));
}
CloseableUtils.closeAndWrapExceptions(columnarInts);
}
Aggregations