use of org.apache.drill.test.rowSet.RowSetComparison in project drill by apache.
the class TestScanOrchestratorLateSchema method testLateSchemaWildcard.
/**
* Test SELECT * from an early-schema table of (a, b)
*/
@Test
public void testLateSchemaWildcard() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT * ...
builder.projection(RowSetTestUtils.projectAll());
ScanSchemaOrchestrator orchestrator = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = orchestrator.startReader();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(null);
// Late schema: no batch provided up front.
assertFalse(reader.hasSchema());
// Start a batch and discover a schema: (a, b)
reader.startBatch();
RowSetLoader writer = loader.writer();
writer.addColumn(SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REQUIRED));
writer.addColumn(SchemaBuilder.columnSchema("b", MinorType.VARCHAR, DataMode.REQUIRED));
// Create a batch of data using the discovered schema
writer.addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(1, "fred").addRow(2, "wilma").build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(orchestrator.output()));
orchestrator.close();
}
use of org.apache.drill.test.rowSet.RowSetComparison in project drill by apache.
the class TestConstantColumnLoader method testConstantColumnLoader.
/**
* Test the static column loader using one column of each type.
* The null column is of type int, but the associated value is of
* type string. This is a bit odd, but works out because we detect that
* the string value is null and call setNull on the writer, and avoid
* using the actual data.
*/
@Test
public void testConstantColumnLoader() {
final MajorType aType = MajorType.newBuilder().setMinorType(MinorType.VARCHAR).setMode(DataMode.REQUIRED).build();
final MajorType bType = MajorType.newBuilder().setMinorType(MinorType.VARCHAR).setMode(DataMode.OPTIONAL).build();
final List<ConstantColumnSpec> defns = new ArrayList<>();
defns.add(new DummyColumn("a", aType, "a-value"));
defns.add(new DummyColumn("b", bType, "b-value"));
final ResultVectorCacheImpl cache = new ResultVectorCacheImpl(fixture.allocator());
final ConstantColumnLoader staticLoader = new ConstantColumnLoader(cache, defns);
// Create a batch
staticLoader.load(2);
// Verify
final TupleMetadata expectedSchema = new SchemaBuilder().add("a", aType).add("b", bType).buildSchema();
final SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow("a-value", "b-value").addRow("a-value", "b-value").build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(staticLoader.load(2)));
staticLoader.close();
}
use of org.apache.drill.test.rowSet.RowSetComparison in project drill by apache.
the class TestScanOperExecLateSchema method testLateSchemaLifecycle.
/**
* Most basic test of a reader that discovers its schema as it goes along.
* The purpose is to validate the most basic life-cycle steps before trying
* more complex variations.
*/
@Test
public void testLateSchemaLifecycle() {
// Create a mock reader, return two batches: one schema-only, another with data.
MockLateSchemaReader reader = new MockLateSchemaReader();
reader.batchLimit = 2;
reader.returnDataOnFirst = false;
// Create the scan operator
ScanFixture scanFixture = simpleFixture(reader);
ScanOperatorExec scan = scanFixture.scanOp;
// Standard startup
assertFalse(reader.openCalled);
// First batch: build schema. The reader does not help: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertTrue(reader.openCalled);
assertEquals(1, reader.batchCount);
assertEquals(0, scan.batchAccessor().rowCount());
// Create the expected result.
SingleRowSet expected = makeExpected(20);
RowSetComparison verifier = new RowSetComparison(expected);
assertEquals(expected.batchSchema(), scan.batchAccessor().schema());
// Next call, return with data.
assertTrue(scan.next());
verifier.verifyAndClearAll(fixture.wrap(scan.batchAccessor().container()));
// EOF
assertFalse(scan.next());
assertTrue(reader.closeCalled);
assertEquals(0, scan.batchAccessor().rowCount());
scanFixture.close();
}
use of org.apache.drill.test.rowSet.RowSetComparison in project drill by apache.
the class TestScanOperExecLateSchema method testLateSchemaDataOnFirst.
@Test
public void testLateSchemaDataOnFirst() {
// Create a mock reader, return two batches: one schema-only, another with data.
MockLateSchemaReader reader = new MockLateSchemaReader();
reader.batchLimit = 1;
reader.returnDataOnFirst = true;
// Create the scan operator
ScanFixture scanFixture = simpleFixture(reader);
ScanOperatorExec scan = scanFixture.scanOp;
// Standard startup
assertFalse(reader.openCalled);
// First batch: build schema. The reader helps: it returns an
// empty first batch.
assertTrue(scan.buildSchema());
assertTrue(reader.openCalled);
assertEquals(1, reader.batchCount);
assertEquals(0, scan.batchAccessor().rowCount());
SingleRowSet expected = makeExpected();
RowSetComparison verifier = new RowSetComparison(expected);
assertEquals(expected.batchSchema(), scan.batchAccessor().schema());
// Next call, return with data.
assertTrue(scan.next());
verifier.verifyAndClearAll(fixture.wrap(scan.batchAccessor().container()));
// EOF
assertFalse(scan.next());
assertTrue(reader.closeCalled);
assertEquals(0, scan.batchAccessor().rowCount());
scanFixture.close();
}
use of org.apache.drill.test.rowSet.RowSetComparison in project drill by apache.
the class TestScanBatchWriters method sanityTest.
@Test
public void sanityTest() throws Exception {
Scan scanConfig = new AbstractSubScan("bob") {
@Override
public String getOperatorType() {
return "";
}
};
OperatorContext opContext = fixture.newOperatorContext(scanConfig);
// Setup: normally done by ScanBatch
VectorContainer container = new VectorContainer(fixture.allocator());
OutputMutator output = new ScanBatch.Mutator(opContext, fixture.allocator(), container);
DrillBuf buffer = opContext.getManagedBuffer();
try (VectorContainerWriter writer = new VectorContainerWriter(output)) {
// Per-batch
writer.allocate();
writer.reset();
BaseWriter.MapWriter map = writer.rootAsMap();
// Write one record (10, "Fred", [100, 110, 120] )
map.integer("a").writeInt(10);
byte[] bytes = "Fred".getBytes("UTF-8");
buffer.setBytes(0, bytes, 0, bytes.length);
map.varChar("b").writeVarChar(0, bytes.length, buffer);
try (ListWriter list = map.list("c")) {
list.startList();
list.integer().writeInt(100);
list.integer().writeInt(110);
list.integer().writeInt(120);
list.endList();
// Write another record: (20, "Wilma", [])
writer.setPosition(1);
map.integer("a").writeInt(20);
bytes = "Wilma".getBytes("UTF-8");
buffer.setBytes(0, bytes, 0, bytes.length);
map.varChar("b").writeVarChar(0, bytes.length, buffer);
writer.setValueCount(2);
// Wrap-up done by ScanBatch
container.setRecordCount(2);
container.buildSchema(SelectionVectorMode.NONE);
RowSet rowSet = fixture.wrap(container);
// Expected
TupleMetadata schema = new SchemaBuilder().addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).addArray("c", MinorType.INT).buildSchema();
RowSet expected = fixture.rowSetBuilder(schema).addRow(10, "Fred", new int[] { 100, 110, 120 }).addRow(20, "Wilma", null).build();
new RowSetComparison(expected).verifyAndClearAll(rowSet);
}
} finally {
opContext.close();
}
}
Aggregations