use of org.apache.drill.exec.physical.rowSet.RowSetLoader in project drill by axbaretto.
the class TestResultSetLoaderProjection method doProjectionTest.
private void doProjectionTest(ResultSetLoader rsLoader) {
RowSetLoader rootWriter = rsLoader.writer();
// All columns appear, including non-projected ones.
TupleMetadata actualSchema = rootWriter.schema();
assertEquals(4, actualSchema.size());
assertEquals("a", actualSchema.column(0).getName());
assertEquals("b", actualSchema.column(1).getName());
assertEquals("c", actualSchema.column(2).getName());
assertEquals("d", actualSchema.column(3).getName());
assertEquals(0, actualSchema.index("A"));
assertEquals(3, actualSchema.index("d"));
assertEquals(-1, actualSchema.index("e"));
// Non-projected columns identify themselves via metadata
assertFalse(actualSchema.metadata("a").isProjected());
assertTrue(actualSchema.metadata("b").isProjected());
assertTrue(actualSchema.metadata("c").isProjected());
assertFalse(actualSchema.metadata("d").isProjected());
// Write some data. Doesn't need much.
rsLoader.startBatch();
for (int i = 1; i < 3; i++) {
rootWriter.start();
rootWriter.scalar(0).setInt(i * 5);
rootWriter.scalar(1).setInt(i);
rootWriter.scalar(2).setInt(i * 10);
rootWriter.scalar(3).setInt(i * 20);
rootWriter.save();
}
// Verify. Result should only have the projected
// columns, only if defined by the loader, in the order
// of definition.
BatchSchema expectedSchema = new SchemaBuilder().add("b", MinorType.INT).add("c", MinorType.INT).build();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(1, 10).addRow(2, 20).build();
RowSet actual = fixture.wrap(rsLoader.harvest());
// actual.print();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSetLoader in project drill by axbaretto.
the class TestResultSetLoaderProtocol method testBasics.
@Test
public void testBasics() {
ResultSetLoaderImpl rsLoaderImpl = new ResultSetLoaderImpl(fixture.allocator());
ResultSetLoader rsLoader = rsLoaderImpl;
assertEquals(0, rsLoader.schemaVersion());
assertEquals(ResultSetLoader.DEFAULT_ROW_COUNT, rsLoader.targetRowCount());
assertEquals(ValueVector.MAX_BUFFER_SIZE, rsLoader.targetVectorSize());
assertEquals(0, rsLoader.writer().rowCount());
assertEquals(0, rsLoader.batchCount());
assertEquals(0, rsLoader.totalRowCount());
try {
rsLoader.harvest();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Can define schema before starting the first batch.
RowSetLoader rootWriter = rsLoader.writer();
TupleMetadata schema = rootWriter.schema();
assertEquals(0, schema.size());
MaterializedField fieldA = SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REQUIRED);
rootWriter.addColumn(fieldA);
assertEquals(1, schema.size());
assertTrue(fieldA.isEquivalent(schema.column(0)));
assertSame(schema.metadata(0), schema.metadata("a"));
try {
rootWriter.start();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Because writing is an inner loop; no checks are
// done to ensure that writing occurs only in the proper
// state. So, can't test setInt() in the wrong state.
rsLoader.startBatch();
try {
rsLoader.startBatch();
fail();
} catch (IllegalStateException e) {
// Expected
}
assertFalse(rootWriter.isFull());
rootWriter.start();
rootWriter.scalar(0).setInt(100);
assertEquals(0, rootWriter.rowCount());
assertEquals(0, rsLoader.batchCount());
rootWriter.save();
assertEquals(1, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(1, rsLoader.totalRowCount());
// Can add a field after first row, prior rows are
// "back-filled".
MaterializedField fieldB = SchemaBuilder.columnSchema("b", MinorType.INT, DataMode.OPTIONAL);
rootWriter.addColumn(fieldB);
assertEquals(2, schema.size());
assertTrue(fieldB.isEquivalent(schema.column(1)));
assertSame(schema.metadata(1), schema.metadata("b"));
rootWriter.start();
rootWriter.scalar(0).setInt(200);
rootWriter.scalar(1).setInt(210);
rootWriter.save();
assertEquals(2, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(2, rsLoader.totalRowCount());
// Harvest the first batch. Version number is the number
// of columns added.
assertFalse(rootWriter.isFull());
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(2, rsLoader.schemaVersion());
assertEquals(0, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(2, rsLoader.totalRowCount());
SingleRowSet expected = fixture.rowSetBuilder(result.batchSchema()).addRow(100, null).addRow(200, 210).build();
new RowSetComparison(expected).verifyAndClearAll(result);
try {
rootWriter.start();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.harvest();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Create a second batch
rsLoader.startBatch();
assertEquals(0, rootWriter.rowCount());
assertEquals(1, rsLoader.batchCount());
assertEquals(2, rsLoader.totalRowCount());
rootWriter.start();
rootWriter.scalar(0).setInt(300);
rootWriter.scalar(1).setInt(310);
rootWriter.save();
assertEquals(1, rootWriter.rowCount());
assertEquals(2, rsLoader.batchCount());
assertEquals(3, rsLoader.totalRowCount());
rootWriter.start();
rootWriter.scalar(0).setInt(400);
rootWriter.scalar(1).setInt(410);
rootWriter.save();
// Harvest. Schema has not changed.
result = fixture.wrap(rsLoader.harvest());
assertEquals(2, rsLoader.schemaVersion());
assertEquals(0, rootWriter.rowCount());
assertEquals(2, rsLoader.batchCount());
assertEquals(4, rsLoader.totalRowCount());
expected = fixture.rowSetBuilder(result.batchSchema()).addRow(300, 310).addRow(400, 410).build();
new RowSetComparison(expected).verifyAndClearAll(result);
// Next batch. Schema has changed.
rsLoader.startBatch();
rootWriter.start();
rootWriter.scalar(0).setInt(500);
rootWriter.scalar(1).setInt(510);
rootWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.OPTIONAL));
rootWriter.scalar(2).setInt(520);
rootWriter.save();
rootWriter.start();
rootWriter.scalar(0).setInt(600);
rootWriter.scalar(1).setInt(610);
rootWriter.scalar(2).setInt(620);
rootWriter.save();
result = fixture.wrap(rsLoader.harvest());
assertEquals(3, rsLoader.schemaVersion());
expected = fixture.rowSetBuilder(result.batchSchema()).addRow(500, 510, 520).addRow(600, 610, 620).build();
new RowSetComparison(expected).verifyAndClearAll(result);
rsLoader.close();
try {
rootWriter.start();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.writer();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.startBatch();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rsLoader.harvest();
fail();
} catch (IllegalStateException e) {
// Expected
}
try {
rootWriter.save();
fail();
} catch (IllegalStateException e) {
// Expected
}
// Benign to close twice
rsLoader.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSetLoader in project drill by axbaretto.
the class TestResultSetLoaderTorture method doTortureTest.
private void doTortureTest() {
TupleMetadata schema = new SchemaBuilder().add("n0", MinorType.INT).addMap("m1").addNullable("n1", MinorType.INT).addMapArray("m2").addNullable("n2", MinorType.INT).addNullable("s2", MinorType.VARCHAR).addMap("m3").addNullable("n3", MinorType.INT).addArray("s3", MinorType.VARCHAR).resumeMap().resumeMap().resumeSchema().buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setRowCountLimit(ValueVector.MAX_ROW_COUNT).setSchema(schema).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
TestSetup setup = new TestSetup();
BatchWriter batchWriter = new BatchWriter(setup, rootWriter);
int totalRowCount = 0;
ReadState readState = new ReadState();
for (int batchCount = 0; batchCount < 10; batchCount++) {
rsLoader.startBatch();
batchWriter.writeBatch();
// Now the hard part. Verify the above batch.
RowSet result = fixture.wrap(rsLoader.harvest());
// result.print();
// Should have overflowed
int savedCount = batchWriter.rowCount();
assertEquals(savedCount, result.rowCount());
totalRowCount += savedCount;
assertEquals(totalRowCount, rsLoader.totalRowCount());
assertEquals(batchCount + 1, rsLoader.batchCount());
BatchReader reader = new BatchReader(setup, result.reader(), readState);
reader.verify();
result.clear();
}
// Last row overflow row
{
rsLoader.startBatch();
// Use this to visualize a string buffer. There is also a method
// to visualize offset vectors. These two are the most pesky vectors
// to get right.
// VectorPrinter.printStrings((VarCharVector) ((NullableVarCharVector) ((AbstractScalarWriter) batchWriter.s2Writer).vector()).getValuesVector(), 0, 8);
RowSet result = fixture.wrap(rsLoader.harvest());
// Use this here, or earlier, when things go amiss and you need
// to see what the actual results might be.
// result.print();
totalRowCount++;
assertEquals(totalRowCount, rsLoader.totalRowCount());
BatchReader reader = new BatchReader(setup, result.reader(), readState);
reader.verify();
result.clear();
}
rsLoader.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSetLoader in project drill by axbaretto.
the class TestResultSetSchemaChange method testSchemaChangeFirstBatch.
/**
* Test the case where the schema changes in the first batch.
* Schema changes before the first record are trivial and tested
* elsewhere. Here we write some records, then add new columns, as a
* JSON reader might do.
*/
@Test
public void testSchemaChangeFirstBatch() {
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
// Create initial rows
rsLoader.startBatch();
int rowCount = 0;
for (int i = 0; i < 2; i++) {
rootWriter.start();
rowCount++;
rootWriter.scalar(0).setString("a_" + rowCount);
rootWriter.save();
}
// Add a second column: nullable.
rootWriter.addColumn(SchemaBuilder.columnSchema("b", MinorType.INT, DataMode.OPTIONAL));
for (int i = 0; i < 2; i++) {
rootWriter.start();
rowCount++;
rootWriter.scalar(0).setString("a_" + rowCount);
rootWriter.scalar(1).setInt(rowCount);
rootWriter.save();
}
// Add a third column. Use variable-width so that offset
// vectors must be back-filled.
rootWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.OPTIONAL));
for (int i = 0; i < 2; i++) {
rootWriter.start();
rowCount++;
rootWriter.scalar(0).setString("a_" + rowCount);
rootWriter.scalar(1).setInt(rowCount);
rootWriter.scalar(2).setString("c_" + rowCount);
rootWriter.save();
}
// Fourth: Required Varchar. Previous rows are back-filled with empty strings.
// And a required int. Back-filled with zeros.
// May occasionally be useful. But, does have to work to prevent
// vector corruption if some reader decides to go this route.
rootWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.REQUIRED));
rootWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.INT, DataMode.REQUIRED));
for (int i = 0; i < 2; i++) {
rootWriter.start();
rowCount++;
rootWriter.scalar(0).setString("a_" + rowCount);
rootWriter.scalar(1).setInt(rowCount);
rootWriter.scalar(2).setString("c_" + rowCount);
rootWriter.scalar(3).setString("d_" + rowCount);
rootWriter.scalar(4).setInt(rowCount * 10);
rootWriter.save();
}
// Add an array. Now two offset vectors must be back-filled.
rootWriter.addColumn(SchemaBuilder.columnSchema("f", MinorType.VARCHAR, DataMode.REPEATED));
for (int i = 0; i < 2; i++) {
rootWriter.start();
rowCount++;
rootWriter.scalar(0).setString("a_" + rowCount);
rootWriter.scalar(1).setInt(rowCount);
rootWriter.scalar(2).setString("c_" + rowCount);
rootWriter.scalar(3).setString("d_" + rowCount);
rootWriter.scalar(4).setInt(rowCount * 10);
ScalarWriter arrayWriter = rootWriter.column(5).array().scalar();
arrayWriter.setString("f_" + rowCount + "-1");
arrayWriter.setString("f_" + rowCount + "-2");
rootWriter.save();
}
// Harvest the batch and verify.
RowSet actual = fixture.wrap(rsLoader.harvest());
BatchSchema expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).addNullable("b", MinorType.INT).addNullable("c", MinorType.VARCHAR).add("d", MinorType.VARCHAR).add("e", MinorType.INT).addArray("f", MinorType.VARCHAR).build();
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow("a_1", null, null, "", 0, strArray()).addRow("a_2", null, null, "", 0, strArray()).addRow("a_3", 3, null, "", 0, strArray()).addRow("a_4", 4, null, "", 0, strArray()).addRow("a_5", 5, "c_5", "", 0, strArray()).addRow("a_6", 6, "c_6", "", 0, strArray()).addRow("a_7", 7, "c_7", "d_7", 70, strArray()).addRow("a_8", 8, "c_8", "d_8", 80, strArray()).addRow("a_9", 9, "c_9", "d_9", 90, strArray("f_9-1", "f_9-2")).addRow("a_10", 10, "c_10", "d_10", 100, strArray("f_10-1", "f_10-2")).build();
new RowSetComparison(expected).verifyAndClearAll(actual);
rsLoader.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSetLoader in project drill by axbaretto.
the class TestResultSetSchemaChange method testSchemaChangeWithOverflow.
/**
* Test a schema change on the row that overflows. If the
* new column is added after overflow, it will appear as
* a schema-change in the following batch. This is fine as
* we are essentially time-shifting: pretending that the
* overflow row was written in the next batch (which, in
* fact, it is: that's what overflow means.)
*/
@Test
public void testSchemaChangeWithOverflow() {
ResultSetOptions options = new OptionBuilder().setRowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
rootWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
rsLoader.startBatch();
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
while (!rootWriter.isFull()) {
rootWriter.start();
rootWriter.scalar(0).setBytes(value, value.length);
if (rootWriter.isFull()) {
rootWriter.addColumn(SchemaBuilder.columnSchema("b", MinorType.INT, DataMode.OPTIONAL));
rootWriter.scalar(1).setInt(count);
// Add a Varchar to ensure its offset fiddling is done properly
rootWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.OPTIONAL));
rootWriter.scalar(2).setString("c-" + count);
// Allow adding a required column at this point.
// (Not intuitively obvious that this should work; we back-fill
// with zeros.)
rootWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.INT, DataMode.REQUIRED));
}
rootWriter.save();
count++;
}
// Result should include only the first column.
BatchSchema expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).build();
RowSet result = fixture.wrap(rsLoader.harvest());
assertTrue(result.batchSchema().isEquivalent(expectedSchema));
assertEquals(count - 1, result.rowCount());
result.clear();
assertEquals(1, rsLoader.schemaVersion());
// Double check: still can add a required column after
// starting the next batch. (No longer in overflow state.)
rsLoader.startBatch();
rootWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.INT, DataMode.REQUIRED));
// Next batch should start with the overflow row, including
// the column added at the end of the previous batch, after
// overflow.
result = fixture.wrap(rsLoader.harvest());
assertEquals(5, rsLoader.schemaVersion());
assertEquals(1, result.rowCount());
expectedSchema = new SchemaBuilder(expectedSchema).addNullable("b", MinorType.INT).addNullable("c", MinorType.VARCHAR).add("d", MinorType.INT).add("e", MinorType.INT).build();
assertTrue(result.batchSchema().isEquivalent(expectedSchema));
RowSetReader reader = result.reader();
reader.next();
assertEquals(count - 1, reader.scalar(1).getInt());
assertEquals("c-" + (count - 1), reader.scalar(2).getString());
assertEquals(0, reader.scalar("d").getInt());
assertEquals(0, reader.scalar("e").getInt());
result.clear();
rsLoader.close();
}
Aggregations