use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestResultSetLoaderMapArray method testNestedArray.
@Test
public void testNestedArray() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Write a couple of rows with arrays within arrays.
// (And, of course, the Varchar is actually an array of
// bytes, so that's three array levels.)
rsLoader.startBatch();
rootWriter.addRow(10, objArray(objArray(110, strArray("d1.1.1", "d1.1.2")), objArray(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, objArray()).addRow(30, objArray(objArray(310, strArray("d3.1.1", "d3.2.2")), objArray(320, strArray()), objArray(330, strArray("d3.3.1", "d1.2.2"))));
// Verify the batch
RowSet actual = fixture.wrap(rsLoader.harvest());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(objArray(110, strArray("d1.1.1", "d1.1.2")), objArray(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, objArray()).addRow(30, objArray(objArray(310, strArray("d3.1.1", "d3.2.2")), objArray(320, strArray()), objArray(330, strArray("d3.3.1", "d1.2.2")))).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestResultSetLoaderMapArray method testOverwriteRow.
/**
* Version of the {#link TestResultSetLoaderProtocol#testOverwriteRow()} test
* that uses nested columns inside an array of maps. Here we must call
* <tt>start()</tt> to reset the array back to the initial start position after
* each "discard."
*/
@Test
public void testOverwriteRow() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).setRowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Can't use the shortcut to populate rows when doing overwrites.
ScalarWriter aWriter = rootWriter.scalar("a");
ArrayWriter maWriter = rootWriter.array("m");
TupleWriter mWriter = maWriter.tuple();
ScalarWriter bWriter = mWriter.scalar("b");
ScalarWriter cWriter = mWriter.scalar("c");
// Write 100,000 rows, overwriting 99% of them. This will cause vector
// overflow and data corruption if overwrite does not work; but will happily
// produce the correct result if everything works as it should.
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (count < 10_000) {
rootWriter.start();
count++;
aWriter.setInt(count);
for (int i = 0; i < 10; i++) {
bWriter.setInt(count * 10 + i);
cWriter.setBytes(value, value.length);
maWriter.save();
}
if (count % 100 == 0) {
rootWriter.save();
}
}
// Verify using a reader.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(count / 100, result.rowCount());
RowSetReader reader = result.reader();
ArrayReader maReader = reader.array("m");
TupleReader mReader = maReader.tuple();
int rowId = 1;
while (reader.next()) {
assertEquals(rowId * 100, reader.scalar("a").getInt());
assertEquals(10, maReader.size());
for (int i = 0; i < 10; i++) {
maReader.setPosn(i);
assertEquals(rowId * 1000 + i, mReader.scalar("b").getInt());
assertTrue(Arrays.equals(value, mReader.scalar("c").getBytes()));
}
rowId++;
}
result.clear();
rsLoader.close();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestRecordBatchSizer method testSizerRepeatedFixedWidth.
@Test
public void testSizerRepeatedFixedWidth() {
BatchSchema schema = new SchemaBuilder().addArray("a", MinorType.BIGINT).addArray("b", MinorType.FLOAT8).build();
RowSetBuilder builder = fixture.rowSetBuilder(schema);
for (long i = 0; i < 10; i++) {
builder.addRow(new long[] { 1, 2, 3, 4, 5 }, new double[] { i * 0.1, i * 0.1, i * 0.1, i * 0.2, i * 0.3 });
}
RowSet rows = builder.build();
// Run the record batch sizer on the resulting batch.
RecordBatchSizer sizer = new RecordBatchSizer(rows.container());
assertEquals(2, sizer.columns().size());
/**
* stdDataSize:8*10, stdNetSize:8*10+4, dataSizePerEntry:5*8, netSizePerEntry:5*8+4,
* totalDataSize:5*8*10, totalNetSize:5*8*10+5*8, valueCount:10,
* elementCount:50, estElementCountPerArray:5, isVariableWidth:false
*/
verifyColumnValues(sizer.columns().get("a"), 80, 84, 40, 44, 400, 440, 10, 50, 5, false);
verifyColumnValues(sizer.columns().get("b"), 80, 84, 40, 44, 400, 440, 10, 50, 5, false);
SingleRowSet empty = fixture.rowSet(schema);
VectorAccessible accessible = empty.vectorAccessible();
UInt4Vector offsetVector;
ValueVector dataVector;
for (VectorWrapper<?> vw : accessible) {
ValueVector v = vw.getValueVector();
RecordBatchSizer.ColumnSize colSize = sizer.getColumn(v.getField().getName());
// Allocates to nearest power of two
colSize.allocateVector(v, testRowCount);
offsetVector = ((RepeatedValueVector) v).getOffsetVector();
assertEquals((Integer.highestOneBit(testRowCount) << 1), offsetVector.getValueCapacity());
dataVector = ((RepeatedValueVector) v).getDataVector();
assertEquals(Integer.highestOneBit((testRowCount * 5) << 1), dataVector.getValueCapacity());
v.clear();
// Allocates the same as value passed since it is already power of two.
// -1 is done for adjustment needed for offset vector.
colSize.allocateVector(v, testRowCountPowerTwo - 1);
offsetVector = ((RepeatedValueVector) v).getOffsetVector();
assertEquals(testRowCountPowerTwo, offsetVector.getValueCapacity());
dataVector = ((RepeatedValueVector) v).getDataVector();
assertEquals(Integer.highestOneBit((testRowCountPowerTwo - 1) * 5) << 1, dataVector.getValueCapacity());
v.clear();
// Allocate for max rows.
colSize.allocateVector(v, ValueVector.MAX_ROW_COUNT - 1);
offsetVector = ((RepeatedValueVector) v).getOffsetVector();
assertEquals(ValueVector.MAX_ROW_COUNT, offsetVector.getValueCapacity());
dataVector = ((RepeatedValueVector) v).getDataVector();
assertEquals(Integer.highestOneBit(((ValueVector.MAX_ROW_COUNT - 1) * 5) << 1), dataVector.getValueCapacity());
v.clear();
// Allocate for 0 rows. should atleast do allocation for 1 row.
colSize.allocateVector(v, 0);
offsetVector = ((RepeatedValueVector) v).getOffsetVector();
assertEquals(ValueVector.MIN_ROW_COUNT + 1, offsetVector.getValueCapacity());
dataVector = ((RepeatedValueVector) v).getDataVector();
assertEquals(ValueVector.MIN_ROW_COUNT, dataVector.getValueCapacity());
v.clear();
}
empty.clear();
rows.clear();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestRecordBatchSizer method testSizerNullableFixedWidth.
@Test
public void testSizerNullableFixedWidth() {
BatchSchema schema = new SchemaBuilder().addNullable("a", MinorType.BIGINT).addNullable("b", MinorType.FLOAT8).build();
RowSetBuilder builder = fixture.rowSetBuilder(schema);
for (long i = 0; i < 10; i++) {
builder.addRow(i, i * 0.1);
}
RowSet rows = builder.build();
// Run the record batch sizer on the resulting batch.
RecordBatchSizer sizer = new RecordBatchSizer(rows.container());
assertEquals(2, sizer.columns().size());
ColumnSize aColumn = sizer.columns().get("a");
ColumnSize bColumn = sizer.columns().get("b");
/**
* stdDataSize:8, stdNetSize:8+1, dataSizePerEntry:8, netSizePerEntry:8+1,
* totalDataSize:8*10, totalNetSize:(8+1)*10, valueCount:10,
* elementCount:10, estElementCountPerArray:1, isVariableWidth:false
*/
verifyColumnValues(aColumn, 8, 9, 8, 9, 80, 90, 10, 10, 1, false);
verifyColumnValues(bColumn, 8, 9, 8, 9, 80, 90, 10, 10, 1, false);
SingleRowSet empty = fixture.rowSet(schema);
VectorAccessible accessible = empty.vectorAccessible();
ValueVector bitVector, valueVector;
for (VectorWrapper<?> vw : accessible) {
ValueVector v = vw.getValueVector();
RecordBatchSizer.ColumnSize colSize = sizer.getColumn(v.getField().getName());
// Allocates to nearest power of two
colSize.allocateVector(v, testRowCount);
bitVector = ((NullableVector) v).getBitsVector();
assertEquals((Integer.highestOneBit(testRowCount) << 1), bitVector.getValueCapacity());
valueVector = ((NullableVector) v).getValuesVector();
assertEquals(Integer.highestOneBit(testRowCount << 1), valueVector.getValueCapacity());
v.clear();
// Allocates the same as value passed since it is already power of two.
colSize.allocateVector(v, testRowCountPowerTwo);
bitVector = ((NullableVector) v).getBitsVector();
assertEquals(testRowCountPowerTwo, bitVector.getValueCapacity());
valueVector = ((NullableVector) v).getValuesVector();
assertEquals(testRowCountPowerTwo, valueVector.getValueCapacity());
v.clear();
// Allocate for max rows.
colSize.allocateVector(v, ValueVector.MAX_ROW_COUNT);
bitVector = ((NullableVector) v).getBitsVector();
assertEquals(ValueVector.MAX_ROW_COUNT, bitVector.getValueCapacity());
valueVector = ((NullableVector) v).getValuesVector();
assertEquals(ValueVector.MAX_ROW_COUNT, valueVector.getValueCapacity());
v.clear();
// Allocate for 0 rows. should atleast do allocation for 1 row.
colSize.allocateVector(v, 0);
bitVector = ((NullableVector) v).getBitsVector();
assertEquals(ValueVector.MIN_ROW_COUNT, bitVector.getValueCapacity());
valueVector = ((NullableVector) v).getValuesVector();
assertEquals(ValueVector.MIN_ROW_COUNT, valueVector.getValueCapacity());
v.clear();
}
empty.clear();
rows.clear();
}
use of org.apache.drill.test.rowSet.schema.SchemaBuilder in project drill by axbaretto.
the class TestRecordBatchSizer method testSizerMap.
@Test
public void testSizerMap() {
BatchSchema schema = new SchemaBuilder().addMap("map").add("key", MinorType.INT).add("value", MinorType.VARCHAR).resumeSchema().build();
RowSetBuilder builder = fixture.rowSetBuilder(schema);
for (int i = 0; i < 10; i++) {
builder.addRow((Object) (new Object[] { 10, "a" }));
}
RowSet rows = builder.build();
// Run the record batch sizer on the resulting batch.
RecordBatchSizer sizer = new RecordBatchSizer(rows.container());
assertEquals(1, sizer.columns().size());
/**
* stdDataSize:50+4, stdNetSize:50+4+4, dataSizePerEntry:4+1,
* netSizePerEntry: 4+1+4,
* totalDataSize:5*10, totalNetSize:4*10+4*10+1*10,
* valueCount:10,
* elementCount:10, estElementCountPerArray:1, isVariableWidth:true
*/
verifyColumnValues(sizer.columns().get("map"), 54, 58, 5, 9, 50, 90, 10, 10, 1, false);
SingleRowSet empty = fixture.rowSet(schema);
VectorAccessible accessible = empty.vectorAccessible();
for (VectorWrapper<?> vw : accessible) {
ValueVector v = vw.getValueVector();
RecordBatchSizer.ColumnSize colSize = sizer.getColumn(v.getField().getName());
// Allocates to nearest power of two
colSize.allocateVector(v, testRowCount);
MapVector mapVector = (MapVector) v;
ValueVector keyVector = mapVector.getChild("key");
ValueVector valueVector1 = mapVector.getChild("value");
assertEquals((Integer.highestOneBit(testRowCount) << 1), keyVector.getValueCapacity());
UInt4Vector offsetVector = ((VariableWidthVector) valueVector1).getOffsetVector();
assertEquals((Integer.highestOneBit(testRowCount) << 1), offsetVector.getValueCapacity());
assertEquals(Integer.highestOneBit(testRowCount << 1) - 1, valueVector1.getValueCapacity());
// Allocates the same as value passed since it is already power of two.
colSize.allocateVector(v, testRowCountPowerTwo - 1);
mapVector = (MapVector) v;
keyVector = mapVector.getChild("key");
valueVector1 = mapVector.getChild("value");
assertEquals((Integer.highestOneBit(testRowCountPowerTwo - 1) << 1), keyVector.getValueCapacity());
offsetVector = ((VariableWidthVector) valueVector1).getOffsetVector();
assertEquals(testRowCountPowerTwo, offsetVector.getValueCapacity());
assertEquals(Integer.highestOneBit(testRowCountPowerTwo) - 1, valueVector1.getValueCapacity());
// Allocate for max rows.
colSize.allocateVector(v, ValueVector.MAX_ROW_COUNT - 1);
mapVector = (MapVector) v;
keyVector = mapVector.getChild("key");
valueVector1 = mapVector.getChild("value");
assertEquals(ValueVector.MAX_ROW_COUNT, keyVector.getValueCapacity());
offsetVector = ((VariableWidthVector) valueVector1).getOffsetVector();
assertEquals(ValueVector.MAX_ROW_COUNT, offsetVector.getValueCapacity());
assertEquals(ValueVector.MAX_ROW_COUNT - 1, valueVector1.getValueCapacity());
// Allocate for 0 rows. should atleast do allocation for 1 row.
colSize.allocateVector(v, 0);
mapVector = (MapVector) v;
keyVector = mapVector.getChild("key");
valueVector1 = mapVector.getChild("value");
assertEquals(ValueVector.MIN_ROW_COUNT, keyVector.getValueCapacity());
offsetVector = ((VariableWidthVector) valueVector1).getOffsetVector();
assertEquals(ValueVector.MIN_ROW_COUNT + 1, offsetVector.getValueCapacity());
assertEquals(ValueVector.MIN_ROW_COUNT, valueVector1.getValueCapacity());
v.clear();
}
empty.clear();
rows.clear();
}
Aggregations