use of com.linkedin.pinot.core.io.reader.impl.v1.VarByteChunkSingleValueReader in project pinot by linkedin.
the class ColumnIndexContainer method getRawIndexReader.
public static SingleColumnSingleValueReader getRawIndexReader(PinotDataBuffer fwdIndexBuffer, FieldSpec.DataType dataType) throws IOException {
SingleColumnSingleValueReader reader;
// TODO: Make compression/decompression configurable.
ChunkDecompressor decompressor = ChunkCompressorFactory.getDecompressor("snappy");
switch(dataType) {
case INT:
case LONG:
case FLOAT:
case DOUBLE:
reader = new FixedByteChunkSingleValueReader(fwdIndexBuffer, decompressor);
break;
case STRING:
reader = new VarByteChunkSingleValueReader(fwdIndexBuffer, decompressor);
break;
default:
throw new IllegalArgumentException("Illegal data type for raw index reader: " + dataType);
}
return reader;
}
use of com.linkedin.pinot.core.io.reader.impl.v1.VarByteChunkSingleValueReader in project pinot by linkedin.
the class RawIndexCreatorTest method testStringRawIndexCreator.
/**
* Test for string raw index creator.
* Compares values read from the raw index against expected value.
* @throws Exception
*/
@Test
public void testStringRawIndexCreator() throws Exception {
PinotDataBuffer indexBuffer = getIndexBufferForColumn(STRING_COLUMN);
ChunkDecompressor uncompressor = ChunkCompressorFactory.getDecompressor("snappy");
VarByteChunkSingleValueReader rawIndexReader = new VarByteChunkSingleValueReader(indexBuffer, uncompressor);
_recordReader.rewind();
ChunkReaderContext context = rawIndexReader.createContext();
for (int row = 0; row < NUM_ROWS; row++) {
GenericRow expectedRow = _recordReader.next();
Object expected = expectedRow.getValue(STRING_COLUMN);
Object actual = rawIndexReader.getString(row, context);
Assert.assertEquals(actual, expected);
}
}
use of com.linkedin.pinot.core.io.reader.impl.v1.VarByteChunkSingleValueReader in project pinot by linkedin.
the class VarByteChunkSingleValueReaderWriteTest method test.
/**
* This test writes {@link #NUM_STRINGS} using {@link VarByteChunkSingleValueWriter}. It then reads
* the strings using {@link VarByteChunkSingleValueReader}, and asserts that what was written is the same as
* what was read in.
*
* Number of docs and docs per chunk are chosen to generate complete as well partial chunks.
*
* @throws Exception
*/
@Test
public void test() throws Exception {
String[] expected = new String[NUM_STRINGS];
Random random = new Random();
File outFile = new File(TEST_FILE);
FileUtils.deleteQuietly(outFile);
int maxStringLengthInBytes = 0;
for (int i = 0; i < NUM_STRINGS; i++) {
expected[i] = RandomStringUtils.random(random.nextInt(MAX_STRING_LENGTH));
maxStringLengthInBytes = Math.max(maxStringLengthInBytes, expected[i].getBytes(UTF_8).length);
}
ChunkCompressor compressor = ChunkCompressorFactory.getCompressor("snappy");
VarByteChunkSingleValueWriter writer = new VarByteChunkSingleValueWriter(outFile, compressor, NUM_STRINGS, NUM_DOCS_PER_CHUNK, maxStringLengthInBytes);
for (int i = 0; i < NUM_STRINGS; i++) {
writer.setString(i, expected[i]);
}
writer.close();
PinotDataBuffer pinotDataBuffer = PinotDataBuffer.fromFile(outFile, ReadMode.mmap, FileChannel.MapMode.READ_ONLY, getClass().getName());
ChunkDecompressor uncompressor = ChunkCompressorFactory.getDecompressor("snappy");
VarByteChunkSingleValueReader reader = new VarByteChunkSingleValueReader(pinotDataBuffer, uncompressor);
ChunkReaderContext context = reader.createContext();
for (int i = 0; i < NUM_STRINGS; i++) {
String actual = reader.getString(i, context);
Assert.assertEquals(actual, expected[i]);
}
reader.close();
FileUtils.deleteQuietly(outFile);
}
Aggregations