use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class TestResultSetLoaderMapArray method testDoubleNestedArray.
/**
* Test a doubly-nested arrays of maps.
*/
@Test
public void testDoubleNestedArray() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m1").add("b", MinorType.INT).addMapArray("m2").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
ScalarWriter aWriter = rootWriter.scalar("a");
ArrayWriter a1Writer = rootWriter.array("m1");
TupleWriter m1Writer = a1Writer.tuple();
ScalarWriter bWriter = m1Writer.scalar("b");
ArrayWriter a2Writer = m1Writer.array("m2");
TupleWriter m2Writer = a2Writer.tuple();
ScalarWriter cWriter = m2Writer.scalar("c");
ScalarWriter dWriter = m2Writer.array("d").scalar();
for (int i = 0; i < 5; i++) {
rootWriter.start();
aWriter.setInt(i);
for (int j = 0; j < 4; j++) {
int a1Key = i + 10 + j;
bWriter.setInt(a1Key);
for (int k = 0; k < 3; k++) {
int a2Key = a1Key * 10 + k;
cWriter.setInt(a2Key);
for (int l = 0; l < 2; l++) {
dWriter.setString("d-" + (a2Key * 10 + l));
}
a2Writer.save();
}
a1Writer.save();
}
rootWriter.save();
}
RowSet results = fixture.wrap(rsLoader.harvest());
RowSetReader reader = results.reader();
ScalarReader aReader = reader.scalar("a");
ArrayReader a1Reader = reader.array("m1");
TupleReader m1Reader = a1Reader.tuple();
ScalarReader bReader = m1Reader.scalar("b");
ArrayReader a2Reader = m1Reader.array("m2");
TupleReader m2Reader = a2Reader.tuple();
ScalarReader cReader = m2Reader.scalar("c");
ScalarElementReader dReader = m2Reader.elements("d");
for (int i = 0; i < 5; i++) {
reader.next();
assertEquals(i, aReader.getInt());
for (int j = 0; j < 4; j++) {
a1Reader.setPosn(j);
int a1Key = i + 10 + j;
assertEquals(a1Key, bReader.getInt());
for (int k = 0; k < 3; k++) {
a2Reader.setPosn(k);
int a2Key = a1Key * 10 + k;
assertEquals(a2Key, cReader.getInt());
for (int l = 0; l < 2; l++) {
assertEquals("d-" + (a2Key * 10 + l), dReader.getString(l));
}
}
}
}
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class TestOffsetVectorWriter method testSizeLimit.
/**
* Test resize monitoring. Add a listener to an offsets writer,
* capture each resize, and refuse a resize when the number
* of ints exceeds 8K values. This will trigger an overflow,
* which will throw an exception which we then check for.
*/
@Test
public void testSizeLimit() {
try (UInt4Vector vector = allocVector(1000)) {
TestIndex index = new TestIndex();
OffsetVectorWriter writer = makeWriter(vector, index);
writer.bindListener(new ColumnWriterListener() {
int totalAlloc = 4096;
@Override
public void overflowed(ScalarWriter writer) {
throw new IllegalStateException("overflow called");
}
@Override
public boolean canExpand(ScalarWriter writer, int delta) {
// System.out.println("Delta: " + delta);
totalAlloc += delta;
return totalAlloc < 16_384 * 4;
}
});
writer.startWrite();
try {
for (int i = 0; ; i++) {
index.index = i;
writer.startRow();
writer.setNextOffset(i);
writer.saveRow();
}
} catch (IllegalStateException e) {
assertTrue(e.getMessage().contains("overflow called"));
}
// Should have failed on 8191, which doubled vector
// to 16K, which was rejected. Note the 8191 value,
// because offsets are one ahead of the index.
assertEquals(8191, index.index);
}
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class RowSetTest method testTopFixedWidthArray.
/**
* Test an array of ints (as an example fixed-width type)
* at the top level of a schema.
*/
@Test
public void testTopFixedWidthArray() {
BatchSchema batchSchema = new SchemaBuilder().add("c", MinorType.INT).addArray("a", MinorType.INT).build();
ExtendableRowSet rs1 = fixture.rowSet(batchSchema);
RowSetWriter writer = rs1.writer();
writer.scalar(0).setInt(10);
ScalarWriter array = writer.array(1).scalar();
array.setInt(100);
array.setInt(110);
writer.save();
writer.scalar(0).setInt(20);
array.setInt(200);
array.setInt(120);
array.setInt(220);
writer.save();
writer.scalar(0).setInt(30);
writer.save();
SingleRowSet result = writer.done();
RowSetReader reader = result.reader();
assertTrue(reader.next());
assertEquals(10, reader.scalar(0).getInt());
ScalarElementReader arrayReader = reader.array(1).elements();
assertEquals(2, arrayReader.size());
assertEquals(100, arrayReader.getInt(0));
assertEquals(110, arrayReader.getInt(1));
assertTrue(reader.next());
assertEquals(20, reader.scalar(0).getInt());
assertEquals(3, arrayReader.size());
assertEquals(200, arrayReader.getInt(0));
assertEquals(120, arrayReader.getInt(1));
assertEquals(220, arrayReader.getInt(2));
assertTrue(reader.next());
assertEquals(30, reader.scalar(0).getInt());
assertEquals(0, arrayReader.size());
assertFalse(reader.next());
SingleRowSet rs2 = fixture.rowSetBuilder(batchSchema).addRow(10, intArray(100, 110)).addRow(20, intArray(200, 120, 220)).addRow(30, null).build();
new RowSetComparison(rs1).verifyAndClearAll(rs2);
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class RowSetTest method testScalarArrayStructure.
/**
* Test a record with a top level array. The focus here is on the
* scalar array structure.
*
* @throws VectorOverflowException should never occur
*/
@Test
public void testScalarArrayStructure() {
TupleMetadata schema = new SchemaBuilder().addArray("a", MinorType.INT).buildSchema();
ExtendableRowSet rowSet = fixture.rowSet(schema);
RowSetWriter writer = rowSet.writer();
// Repeated Int
// Verify the invariants of the "full" and "simple" access paths
assertEquals(ObjectType.ARRAY, writer.column("a").type());
assertSame(writer.column("a"), writer.column(0));
assertSame(writer.array("a"), writer.array(0));
assertSame(writer.column("a").array(), writer.array("a"));
assertSame(writer.column(0).array(), writer.array(0));
assertEquals(ObjectType.SCALAR, writer.column("a").array().entry().type());
assertEquals(ObjectType.SCALAR, writer.column("a").array().entryType());
assertSame(writer.array(0).entry().scalar(), writer.array(0).scalar());
assertEquals(ValueType.INTEGER, writer.array(0).scalar().valueType());
try {
writer.column(0).scalar();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
try {
writer.column(0).tuple();
fail();
} catch (UnsupportedOperationException e) {
// Expected
}
// Write some data
ScalarWriter intWriter = writer.array("a").scalar();
intWriter.setInt(10);
intWriter.setInt(11);
writer.save();
intWriter.setInt(20);
intWriter.setInt(21);
intWriter.setInt(22);
writer.save();
intWriter.setInt(30);
writer.save();
intWriter.setInt(40);
intWriter.setInt(41);
writer.save();
// Finish the row set and get a reader.
SingleRowSet actual = writer.done();
RowSetReader reader = actual.reader();
// Verify the invariants of the "full" and "simple" access paths
assertEquals(ObjectType.ARRAY, writer.column("a").type());
assertSame(reader.column("a"), reader.column(0));
assertSame(reader.array("a"), reader.array(0));
assertSame(reader.column("a").array(), reader.array("a"));
assertSame(reader.column(0).array(), reader.array(0));
assertEquals(ObjectType.SCALAR, reader.column("a").array().entryType());
assertEquals(ValueType.INTEGER, reader.array(0).elements().valueType());
// Read and verify the rows
ScalarElementReader intReader = reader.array(0).elements();
assertTrue(reader.next());
assertEquals(2, intReader.size());
assertEquals(10, intReader.getInt(0));
assertEquals(11, intReader.getInt(1));
assertTrue(reader.next());
assertEquals(3, intReader.size());
assertEquals(20, intReader.getInt(0));
assertEquals(21, intReader.getInt(1));
assertEquals(22, intReader.getInt(2));
assertTrue(reader.next());
assertEquals(1, intReader.size());
assertEquals(30, intReader.getInt(0));
assertTrue(reader.next());
assertEquals(2, intReader.size());
assertEquals(40, intReader.getInt(0));
assertEquals(41, intReader.getInt(1));
assertFalse(reader.next());
// Test the above again via the writer and reader
// utility classes.
SingleRowSet expected = fixture.rowSetBuilder(schema).addSingleCol(intArray(10, 11)).addSingleCol(intArray(20, 21, 22)).addSingleCol(intArray(30)).addSingleCol(intArray(40, 41)).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
}
use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by axbaretto.
the class TestFillEmpties method dofillEmptiesRepeated.
private void dofillEmptiesRepeated(MajorType majorType) {
TupleMetadata schema = new SchemaBuilder().add("a", majorType).buildSchema();
ExtendableRowSet rs = fixture.rowSet(schema);
RowSetWriter writer = rs.writer();
ScalarWriter colWriter = writer.array(0).scalar();
ValueType valueType = colWriter.valueType();
for (int i = 0; i < ROW_COUNT; i++) {
if (i % 5 == 0) {
// Write two values so we can exercise a bit of the array logic.
colWriter.setObject(RowSetUtilities.testDataFromInt(valueType, majorType, i));
colWriter.setObject(RowSetUtilities.testDataFromInt(valueType, majorType, i + 1));
}
writer.save();
}
SingleRowSet result = writer.done();
RowSetReader reader = result.reader();
ScalarElementReader colReader = reader.array(0).elements();
for (int i = 0; i < ROW_COUNT; i++) {
assertTrue(reader.next());
if (i % 5 != 0) {
// Empty arrays are defined to be the same as a zero-length array.
assertEquals(0, colReader.size());
} else {
for (int j = 0; j < 2; j++) {
Object actual = colReader.getObject(j);
Object expected = RowSetUtilities.testDataFromInt(valueType, majorType, i + j);
RowSetUtilities.assertEqualValues(majorType.toString().replace('\n', ' ') + "[" + i + "][" + j + "]", valueType, expected, actual);
}
}
}
result.clear();
}
Aggregations