use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.
the class TestResultSetLoaderMaps method testMapOverflowWithNewColumn.
/**
* Test the case in which a new column is added during the overflow row. Unlike
* the top-level schema case, internally we must create a copy of the map, and
* move vectors across only when the result is to include the schema version
* of the target column. For overflow, the new column is added after the
* first batch; it is added in the second batch that contains the overflow
* row in which the column was added.
*/
@Test
public void testMapOverflowWithNewColumn() {
final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(4, rsLoader.schemaVersion());
final RowSetLoader rootWriter = rsLoader.writer();
// Can't use the shortcut to populate rows when doing a schema
// change.
final ScalarWriter aWriter = rootWriter.scalar("a");
final TupleWriter mWriter = rootWriter.tuple("m");
final ScalarWriter bWriter = mWriter.scalar("b");
final ScalarWriter cWriter = mWriter.scalar("c");
final byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (!rootWriter.isFull()) {
rootWriter.start();
aWriter.setInt(count);
bWriter.setInt(count * 10);
cWriter.setBytes(value, value.length);
if (rootWriter.isFull()) {
// Overflow just occurred. Add another column.
mWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.INT, DataMode.OPTIONAL));
mWriter.scalar("d").setInt(count * 100);
}
rootWriter.save();
count++;
}
// Result set should include the original columns, but not d.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(4, rsLoader.schemaVersion());
assertTrue(schema.isEquivalent(result.schema()));
final BatchSchema expectedSchema = new BatchSchema(SelectionVectorMode.NONE, schema.toFieldList());
assertTrue(expectedSchema.isEquivalent(result.batchSchema()));
// Use a reader to validate row-by-row. Too large to create an expected
// result set.
RowSetReader reader = result.reader();
TupleReader mapReader = reader.tuple("m");
int rowId = 0;
while (reader.next()) {
assertEquals(rowId, reader.scalar("a").getInt());
assertEquals(rowId * 10, mapReader.scalar("b").getInt());
assertTrue(Arrays.equals(value, mapReader.scalar("c").getBytes()));
rowId++;
}
result.clear();
// Next batch should start with the overflow row
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
result = fixture.wrap(rsLoader.harvest());
assertEquals(1, result.rowCount());
reader = result.reader();
mapReader = reader.tuple("m");
while (reader.next()) {
assertEquals(rowId, reader.scalar("a").getInt());
assertEquals(rowId * 10, mapReader.scalar("b").getInt());
assertTrue(Arrays.equals(value, mapReader.scalar("c").getBytes()));
assertEquals(rowId * 100, mapReader.scalar("d").getInt());
}
result.clear();
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMaps method testMapEvolution.
/**
* Create schema with a map, then add columns to the map
* after delivering the first batch. The new columns should appear
* in the second-batch output.
*/
@Test
public void testMapEvolution() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(3, rsLoader.schemaVersion());
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
rootWriter.addRow(10, objArray("fred")).addRow(20, objArray("barney"));
RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
// Validate first batch
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray("fred")).addRow(20, objArray("barney")).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
// Add three columns in the second batch. One before
// the batch starts, one before the first row, and one after
// the first row.
TupleWriter mapWriter = rootWriter.tuple("m");
mapWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.REQUIRED));
rsLoader.startBatch();
mapWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.BIGINT, DataMode.REQUIRED));
rootWriter.addRow(30, objArray("wilma", 130, 130_000L));
mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REQUIRED));
rootWriter.addRow(40, objArray("betty", 140, 140_000L, "bam-bam"));
actual = fixture.wrap(rsLoader.harvest());
assertEquals(6, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
// Validate first batch
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).add("c", MinorType.INT).add("d", MinorType.BIGINT).add("e", MinorType.VARCHAR).resumeSchema().buildSchema();
expected = fixture.rowSetBuilder(expectedSchema).addRow(30, objArray("wilma", 130, 130_000L, "")).addRow(40, objArray("betty", 140, 140_000L, "bam-bam")).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMaps method testBasics.
@Test
public void testBasics() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).resumeSchema().add("e", MinorType.VARCHAR).buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Verify structure and schema
assertEquals(5, rsLoader.schemaVersion());
TupleMetadata actualSchema = rootWriter.schema();
assertEquals(3, actualSchema.size());
assertTrue(actualSchema.metadata(1).isMap());
assertEquals(2, actualSchema.metadata("m").mapSchema().size());
assertEquals(2, actualSchema.column("m").getChildren().size());
rsLoader.startBatch();
// Write a row the way that clients will do.
ScalarWriter aWriter = rootWriter.scalar("a");
TupleWriter mWriter = rootWriter.tuple("m");
ScalarWriter cWriter = mWriter.scalar("c");
ScalarWriter dWriter = mWriter.scalar("d");
ScalarWriter eWriter = rootWriter.scalar("e");
rootWriter.start();
aWriter.setInt(10);
cWriter.setInt(110);
dWriter.setString("fred");
eWriter.setString("pebbles");
rootWriter.save();
try {
mWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.OPTIONAL));
fail();
} catch (IllegalArgumentException e) {
// Expected
}
// Write another using the test-time conveniences
rootWriter.addRow(20, objArray(210, "barney"), "bam-bam");
// Harvest the batch
RowSet actual = fixture.wrap(rsLoader.harvest());
assertEquals(5, rsLoader.schemaVersion());
assertEquals(2, actual.rowCount());
// Validate data
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(110, "fred"), "pebbles").addRow(20, objArray(210, "barney"), "bam-bam").build();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMaps method testNestedMapsNullable.
/**
* Create nested maps. Then, add columns to each map
* on the fly. This time, with nullable types.
*/
@Test
public void testNestedMapsNullable() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").addNullable("b", MinorType.VARCHAR).addMap("m2").addNullable("c", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rsLoader.startBatch();
rootWriter.addRow(10, objArray("b1", objArray("c1")));
// Validate first batch
RowSet actual = fixture.wrap(rsLoader.harvest());
SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray("b1", objArray("c1"))).build();
// actual.print();
// expected.print();
new RowSetComparison(expected).verifyAndClearAll(actual);
// Now add columns in the second batch.
rsLoader.startBatch();
rootWriter.addRow(20, objArray("b2", objArray("c2")));
TupleWriter m1Writer = rootWriter.tuple("m1");
m1Writer.addColumn(SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.OPTIONAL));
TupleWriter m2Writer = m1Writer.tuple("m2");
m2Writer.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.OPTIONAL));
rootWriter.addRow(30, objArray("b3", objArray("c3", "e3"), "d3"));
// And another set while the write proceeds.
m1Writer.addColumn(SchemaBuilder.columnSchema("f", MinorType.VARCHAR, DataMode.OPTIONAL));
m2Writer.addColumn(SchemaBuilder.columnSchema("g", MinorType.VARCHAR, DataMode.OPTIONAL));
rootWriter.addRow(40, objArray("b4", objArray("c4", "e4", "g4"), "d4", "e4"));
// Validate second batch
actual = fixture.wrap(rsLoader.harvest());
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").addNullable("b", MinorType.VARCHAR).addMap("m2").addNullable("c", MinorType.VARCHAR).addNullable("e", MinorType.VARCHAR).addNullable("g", MinorType.VARCHAR).resumeMap().addNullable("d", MinorType.VARCHAR).addNullable("f", MinorType.VARCHAR).resumeSchema().buildSchema();
expected = fixture.rowSetBuilder(expectedSchema).addRow(20, objArray("b2", objArray("c2", null, null), null, null)).addRow(30, objArray("b3", objArray("c3", "e3", null), "d3", null)).addRow(40, objArray("b4", objArray("c4", "e4", "g4"), "d4", "e4")).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by axbaretto.
the class TestResultSetLoaderMaps method testMapOverflowWithNewColumn.
/**
* Test the case in which a new column is added during the overflow row. Unlike
* the top-level schema case, internally we must create a copy of the map, and
* move vectors across only when the result is to include the schema version
* of the target column. For overflow, the new column is added after the
* first batch; it is added in the second batch that contains the overflow
* row in which the column was added.
*/
@Test
public void testMapOverflowWithNewColumn() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).setRowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
assertEquals(4, rsLoader.schemaVersion());
RowSetLoader rootWriter = rsLoader.writer();
// Can't use the shortcut to populate rows when doing a schema
// change.
ScalarWriter aWriter = rootWriter.scalar("a");
TupleWriter mWriter = rootWriter.tuple("m");
ScalarWriter bWriter = mWriter.scalar("b");
ScalarWriter cWriter = mWriter.scalar("c");
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (!rootWriter.isFull()) {
rootWriter.start();
aWriter.setInt(count);
bWriter.setInt(count * 10);
cWriter.setBytes(value, value.length);
if (rootWriter.isFull()) {
// Overflow just occurred. Add another column.
mWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.INT, DataMode.OPTIONAL));
mWriter.scalar("d").setInt(count * 100);
}
rootWriter.save();
count++;
}
// Result set should include the original columns, but not d.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(4, rsLoader.schemaVersion());
assertTrue(schema.isEquivalent(result.schema()));
BatchSchema expectedSchema = new BatchSchema(SelectionVectorMode.NONE, schema.toFieldList());
assertTrue(expectedSchema.isEquivalent(result.batchSchema()));
// Use a reader to validate row-by-row. Too large to create an expected
// result set.
RowSetReader reader = result.reader();
TupleReader mapReader = reader.tuple("m");
int rowId = 0;
while (reader.next()) {
assertEquals(rowId, reader.scalar("a").getInt());
assertEquals(rowId * 10, mapReader.scalar("b").getInt());
assertTrue(Arrays.equals(value, mapReader.scalar("c").getBytes()));
rowId++;
}
result.clear();
// Next batch should start with the overflow row
rsLoader.startBatch();
assertEquals(1, rootWriter.rowCount());
result = fixture.wrap(rsLoader.harvest());
assertEquals(1, result.rowCount());
reader = result.reader();
mapReader = reader.tuple("m");
while (reader.next()) {
assertEquals(rowId, reader.scalar("a").getInt());
assertEquals(rowId * 10, mapReader.scalar("b").getInt());
assertTrue(Arrays.equals(value, mapReader.scalar("c").getBytes()));
assertEquals(rowId * 100, mapReader.scalar("d").getInt());
}
result.clear();
rsLoader.close();
}
Aggregations