use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderDictArray method testArrayValue.
@Test
public void testArrayValue() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addDictArray("d", MinorType.INT).repeatedValue(MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Write a couple of rows
rsLoader.startBatch();
rootWriter.addRow(10, objArray(map(110, strArray("d1.1.1", "d1.1.2"), 111, strArray("d1.1.3", "d1.1.4"), 112, strArray("d1.1.5", "d1.1.6")), map(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, objArray()).addRow(30, objArray(map(310, strArray("d3.1.1", "d3.2.2"), 311, strArray("d3.1.3", "d3.2.4", "d3.1.5", "d3.1.6")), map(320, strArray(), 321, strArray("d3.2.2")), map(330, strArray("d3.3.1", "d1.2.2"))));
// Verify the batch
RowSet actual = fixture.wrap(rsLoader.harvest());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(map(110, strArray("d1.1.1", "d1.1.2"), 111, strArray("d1.1.3", "d1.1.4"), 112, strArray("d1.1.5", "d1.1.6")), map(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, objArray()).addRow(30, objArray(map(310, strArray("d3.1.1", "d3.2.2"), 311, strArray("d3.1.3", "d3.2.4", "d3.1.5", "d3.1.6")), map(320, strArray(), 321, strArray("d3.2.2")), map(330, strArray("d3.3.1", "d1.2.2")))).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderMaps method testNestedMapsNullable.
/**
* Create nested maps. Then, add columns to each map
* on the fly. This time, with nullable types.
*/
@Test
public void testNestedMapsNullable() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").addNullable("b", MinorType.VARCHAR).addMap("m2").addNullable("c", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
final RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
rootWriter.addRow(10, mapValue("b1", mapValue("c1")));
// Validate first batch
RowSet actual = fixture.wrap(rsLoader.harvest());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue("b1", mapValue("c1"))).build();
RowSetUtilities.verify(expected, actual);
// Now add columns in the second batch.
rsLoader.startBatch();
rootWriter.addRow(20, mapValue("b2", mapValue("c2")));
final TupleWriter m1Writer = rootWriter.tuple("m1");
m1Writer.addColumn(SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.OPTIONAL));
final TupleWriter m2Writer = m1Writer.tuple("m2");
m2Writer.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.OPTIONAL));
rootWriter.addRow(30, mapValue("b3", mapValue("c3", "e3"), "d3"));
// And another set while the write proceeds.
m1Writer.addColumn(SchemaBuilder.columnSchema("f", MinorType.VARCHAR, DataMode.OPTIONAL));
m2Writer.addColumn(SchemaBuilder.columnSchema("g", MinorType.VARCHAR, DataMode.OPTIONAL));
rootWriter.addRow(40, mapValue("b4", mapValue("c4", "e4", "g4"), "d4", "e4"));
// Validate second batch
actual = fixture.wrap(rsLoader.harvest());
final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").addNullable("b", MinorType.VARCHAR).addMap("m2").addNullable("c", MinorType.VARCHAR).addNullable("e", MinorType.VARCHAR).addNullable("g", MinorType.VARCHAR).resumeMap().addNullable("d", MinorType.VARCHAR).addNullable("f", MinorType.VARCHAR).resumeSchema().buildSchema();
expected = fixture.rowSetBuilder(expectedSchema).addRow(20, mapValue("b2", mapValue("c2", null, null), null, null)).addRow(30, mapValue("b3", mapValue("c3", "e3", null), "d3", null)).addRow(40, mapValue("b4", mapValue("c4", "e4", "g4"), "d4", "e4")).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderMaps method testMapWithArray.
/**
* Test a map that contains a scalar array. No reason to suspect that this
* will have problem as the array writer is fully tested in the accessor
* subsystem. Still, need to test the cardinality methods of the loader
* layer.
*/
@Test
public void testMapWithArray() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").addArray("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
final RowSetLoader rootWriter = rsLoader.writer();
// Write some rows
rsLoader.startBatch();
rootWriter.addRow(10, mapValue(intArray(110, 120, 130), strArray("d1.1", "d1.2", "d1.3", "d1.4"))).addRow(20, mapValue(intArray(210), strArray())).addRow(30, mapValue(intArray(), strArray("d3.1")));
// Validate first batch
RowSet actual = fixture.wrap(rsLoader.harvest());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue(intArray(110, 120, 130), strArray("d1.1", "d1.2", "d1.3", "d1.4"))).addRow(20, mapValue(intArray(210), strArray())).addRow(30, mapValue(intArray(), strArray("d3.1"))).build();
RowSetUtilities.verify(expected, actual);
// Add another array after the first row in the second batch.
rsLoader.startBatch();
rootWriter.addRow(40, mapValue(intArray(410, 420), strArray("d4.1", "d4.2"))).addRow(50, mapValue(intArray(510), strArray("d5.1")));
final TupleWriter mapWriter = rootWriter.tuple("m");
mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REPEATED));
rootWriter.addRow(60, mapValue(intArray(610, 620), strArray("d6.1", "d6.2"), strArray("e6.1", "e6.2"))).addRow(70, mapValue(intArray(710), strArray(), strArray("e7.1", "e7.2")));
// Validate first batch. The new array should have been back-filled with
// empty offsets for the missing rows.
actual = fixture.wrap(rsLoader.harvest());
expected = fixture.rowSetBuilder(actual.schema()).addRow(40, mapValue(intArray(410, 420), strArray("d4.1", "d4.2"), strArray())).addRow(50, mapValue(intArray(510), strArray("d5.1"), strArray())).addRow(60, mapValue(intArray(610, 620), strArray("d6.1", "d6.2"), strArray("e6.1", "e6.2"))).addRow(70, mapValue(intArray(710), strArray(), strArray("e7.1", "e7.2"))).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderMaps method testMapEvolution.
/**
* Create schema with a map, then add columns to the map
* after delivering the first batch. The new columns should appear
* in the second-batch output.
*/
@Test
public void testMapEvolution() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(3, rsLoader.schemaVersion());
final RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
rootWriter.addRow(10, mapValue("fred")).addRow(20, mapValue("barney"));
RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
// Validate first batch
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue("fred")).addRow(20, mapValue("barney")).build();
RowSetUtilities.verify(expected, actual);
// Add three columns in the second batch. One before
// the batch starts, one before the first row, and one after
// the first row.
final TupleWriter mapWriter = rootWriter.tuple("m");
mapWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.REQUIRED));
rsLoader.startBatch();
mapWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.BIGINT, DataMode.REQUIRED));
rootWriter.addRow(30, mapValue("wilma", 130, 130_000L));
mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REQUIRED));
rootWriter.addRow(40, mapValue("betty", 140, 140_000L, "bam-bam"));
actual = fixture.wrap(rsLoader.harvest());
assertEquals(6, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
// Validate first batch
final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).add("c", MinorType.INT).add("d", MinorType.BIGINT).add("e", MinorType.VARCHAR).resumeSchema().buildSchema();
expected = fixture.rowSetBuilder(expectedSchema).addRow(30, mapValue("wilma", 130, 130_000L, "")).addRow(40, mapValue("betty", 140, 140_000L, "bam-bam")).build();
RowSetUtilities.verify(expected, actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.
the class TestResultSetLoaderOverflow method testVectorSizeLimitWithAppend.
@Test
public void testVectorSizeLimitWithAppend() {
TupleMetadata schema = new SchemaBuilder().add("s", MinorType.VARCHAR).buildSchema();
ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
byte[] head = "abc".getBytes();
byte[] tail = new byte[523];
Arrays.fill(tail, (byte) 'X');
String expected = new String(head, Charsets.UTF_8);
expected += new String(tail, Charsets.UTF_8);
expected += new String(tail, Charsets.UTF_8);
int count = 0;
ScalarWriter colWriter = rootWriter.scalar(0);
while (!rootWriter.isFull()) {
rootWriter.start();
colWriter.setBytes(head, head.length);
colWriter.appendBytes(tail, tail.length);
colWriter.appendBytes(tail, tail.length);
rootWriter.save();
count++;
}
// Number of rows should be driven by vector size.
// Our row count should include the overflow row
int valueLength = head.length + 2 * tail.length;
int expectedCount = ValueVector.MAX_BUFFER_SIZE / valueLength;
assertEquals(expectedCount + 1, count);
// Loader's row count should include only "visible" rows
assertEquals(expectedCount, rootWriter.rowCount());
// Total count should include invisible and look-ahead rows.
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
// Result should exclude the overflow row
{
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(expectedCount, result.rowCount());
// Verify that the values were, in fact, appended.
RowSetReader reader = result.reader();
while (reader.next()) {
assertEquals(expected, reader.scalar(0).getString());
}
result.clear();
}
// Next batch should start with the overflow row
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
assertEquals(expectedCount + 1, rsLoader.totalRowCount());
{
VectorContainer container = rsLoader.harvest();
BatchValidator.validate(container);
RowSet result = fixture.wrap(container);
assertEquals(1, result.rowCount());
RowSetReader reader = result.reader();
while (reader.next()) {
assertEquals(expected, reader.scalar(0).getString());
}
result.clear();
}
rsLoader.close();
}
Aggregations