use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator in project drill by apache.
the class TestColumnsArray method buildScan.
private ScanSchemaOrchestrator buildScan(boolean requireColumns, List<SchemaPath> cols) {
// Set up the columns array manager
ColumnsArrayManager colsManager = new ColumnsArrayManager(requireColumns);
// Configure the schema orchestrator
ScanOrchestratorBuilder builder = new ColumnsScanBuilder();
builder.addParser(colsManager.projectionParser());
builder.addResolver(colsManager.resolver());
builder.projection(cols);
return new ScanSchemaOrchestrator(fixture.allocator(), builder);
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator in project drill by apache.
the class TestColumnsArray method testMissingColumnsColumn.
/**
* Test attempting to use the columns array with an early schema with
* column types not compatible with a varchar array.
*/
@Test
public void testMissingColumnsColumn() {
ScanSchemaOrchestrator scanner = buildScan(true, RowSetTestUtils.projectList(ColumnsScanFramework.COLUMNS_COL));
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).buildSchema();
try {
ReaderSchemaOrchestrator reader = scanner.startReader();
reader.makeTableLoader(tableSchema);
reader.defineSchema();
fail();
} catch (IllegalStateException e) {
// Expected
}
scanner.close();
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator in project drill by apache.
the class TestScanOrchestratorEarlySchema method testTypeSmoothing.
/**
* Test the ability of the scan scanner to "smooth" out schema changes
* by reusing the type from a previous reader, if known. That is,
* given three readers:<br>
* (a, b)<br>
* (b)<br>
* (a, b)<br>
* Then the type of column a should be preserved for the second reader that
* does not include a. This works if a is nullable. If so, a's type will
* be used for the empty column, rather than the usual nullable int.
* <p>
* Detailed testing of type matching for "missing" columns is done
* in {@link #testNullColumnLoader()}.
* <p>
* As a side effect, makes sure that two identical tables (in this case,
* separated by a different table) results in no schema change.
*/
@Test
public void testTypeSmoothing() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT a, b ...
builder.projection(RowSetTestUtils.projectList("a", "b"));
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// file schema (a, b)
TupleMetadata twoColSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).buildSchema();
SchemaTracker tracker = new SchemaTracker();
int schemaVersion;
{
// ... FROM table 1
ReaderSchemaOrchestrator reader = scanner.startReader();
ResultSetLoader loader = reader.makeTableLoader(twoColSchema);
// Projection of (a, b) to (a, b)
reader.startBatch();
loader.writer().addRow(10, "fred").addRow(20, "wilma");
reader.endBatch();
tracker.trackSchema(scanner.output());
schemaVersion = tracker.schemaVersion();
SingleRowSet expected = fixture.rowSetBuilder(twoColSchema).addRow(10, "fred").addRow(20, "wilma").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
{
// ... FROM table 2
ReaderSchemaOrchestrator reader = scanner.startReader();
// File schema (a)
TupleMetadata oneColSchema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
// Projection of (a) to (a, b), reusing b from above.
ResultSetLoader loader = reader.makeTableLoader(oneColSchema);
reader.startBatch();
loader.writer().addRow(30).addRow(40);
reader.endBatch();
tracker.trackSchema(scanner.output());
assertEquals(schemaVersion, tracker.schemaVersion());
SingleRowSet expected = fixture.rowSetBuilder(twoColSchema).addRow(30, null).addRow(40, null).build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
{
// ... FROM table 3
ReaderSchemaOrchestrator reader = scanner.startReader();
// Projection of (a, b), to (a, b), reusing b yet again
ResultSetLoader loader = reader.makeTableLoader(twoColSchema);
reader.startBatch();
loader.writer().addRow(50, "dino").addRow(60, "barney");
reader.endBatch();
tracker.trackSchema(scanner.output());
assertEquals(schemaVersion, tracker.schemaVersion());
SingleRowSet expected = fixture.rowSetBuilder(twoColSchema).addRow(50, "dino").addRow(60, "barney").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
scanner.close();
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator in project drill by apache.
the class TestScanOrchestratorEarlySchema method testModeSmoothing.
@Test
public void testModeSmoothing() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
builder.enableSchemaSmoothing(true);
builder.projection(RowSetTestUtils.projectList("a"));
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// Most general schema: nullable, with precision.
TupleMetadata schema1 = new SchemaBuilder().addNullable("a", MinorType.VARCHAR, 10).buildSchema();
SchemaTracker tracker = new SchemaTracker();
int schemaVersion;
{
// Table 1: most permissive type
ReaderSchemaOrchestrator reader = scanner.startReader();
ResultSetLoader loader = reader.makeTableLoader(schema1);
// Create a batch
reader.startBatch();
loader.writer().addRow("fred").addRow("wilma");
reader.endBatch();
tracker.trackSchema(scanner.output());
schemaVersion = tracker.schemaVersion();
// Verify
SingleRowSet expected = fixture.rowSetBuilder(schema1).addRow("fred").addRow("wilma").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
scanner.closeReader();
}
{
// Table 2: required, use nullable
// Required version.
TupleMetadata schema2 = new SchemaBuilder().add("a", MinorType.VARCHAR, 10).buildSchema();
ReaderSchemaOrchestrator reader = scanner.startReader();
ResultSetLoader loader = reader.makeTableLoader(schema2);
// Create a batch
reader.startBatch();
loader.writer().addRow("barney").addRow("betty");
reader.endBatch();
// Verify, using persistent schema
tracker.trackSchema(scanner.output());
assertEquals(schemaVersion, tracker.schemaVersion());
SingleRowSet expected = fixture.rowSetBuilder(schema1).addRow("barney").addRow("betty").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
scanner.closeReader();
}
{
// Table 3: narrower precision, use wider
// Required version with narrower precision.
TupleMetadata schema3 = new SchemaBuilder().add("a", MinorType.VARCHAR, 5).buildSchema();
ReaderSchemaOrchestrator reader = scanner.startReader();
ResultSetLoader loader = reader.makeTableLoader(schema3);
// Create a batch
reader.startBatch();
loader.writer().addRow("bam-bam").addRow("pebbles");
reader.endBatch();
// Verify, using persistent schema
tracker.trackSchema(scanner.output());
assertEquals(schemaVersion, tracker.schemaVersion());
SingleRowSet expected = fixture.rowSetBuilder(schema1).addRow("bam-bam").addRow("pebbles").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
scanner.closeReader();
}
scanner.close();
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator in project drill by apache.
the class TestScanOrchestratorEarlySchema method testEarlySchemaWildcard.
/**
* Test SELECT * from an early-schema table of (a, b)
*/
@Test
public void testEarlySchemaWildcard() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT * ...
builder.projection(RowSetTestUtils.projectAll());
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = scanner.startReader();
// file schema (a, b)
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(tableSchema);
// Simulate a first reader in a scan that can provide an
// empty batch to define schema.
{
reader.defineSchema();
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).build();
assertNotNull(scanner.output());
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
// Create a batch of data.
reader.startBatch();
loader.writer().addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
{
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(1, "fred").addRow(2, "wilma").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
// Second batch.
reader.startBatch();
loader.writer().addRow(3, "barney").addRow(4, "betty");
reader.endBatch();
// Verify
{
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(3, "barney").addRow(4, "betty").build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
}
// Explicit reader close. (All other tests are lazy, they
// use an implicit close.)
scanner.closeReader();
scanner.close();
}
Aggregations