use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.
the class TestScanOrchestratorLateSchema method testLateSchemaWildcard.
/**
* Test SELECT * from an early-schema table of (a, b)
*/
@Test
public void testLateSchemaWildcard() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
// SELECT * ...
builder.projection(RowSetTestUtils.projectAll());
ScanSchemaOrchestrator orchestrator = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM table
ReaderSchemaOrchestrator reader = orchestrator.startReader();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(null);
// Late schema: no batch provided up front.
assertFalse(reader.hasSchema());
// Start a batch and discover a schema: (a, b)
reader.startBatch();
RowSetLoader writer = loader.writer();
writer.addColumn(SchemaBuilder.columnSchema("a", MinorType.INT, DataMode.REQUIRED));
writer.addColumn(SchemaBuilder.columnSchema("b", MinorType.VARCHAR, DataMode.REQUIRED));
// Create a batch of data using the discovered schema
writer.addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(1, "fred").addRow(2, "wilma").build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(orchestrator.output()));
orchestrator.close();
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.
the class TestScanOrchestratorImplicitColumns method testMixture.
/**
* Test SELECT dir0, b, suffix, c FROM table(a, b)
* Full combination of metadata, table and null columns
*/
@Test
public void testMixture() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
File file = dirTestWatcher.copyResourceToRoot(Paths.get("multilevel", "csv", "1994", "Q1", "orders_94_q1.csv"), Paths.get("x", "y", "z.csv"));
Path filePath = new Path(file.toURI().getPath());
ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(filePath));
builder.withImplicitColumns(metadataManager);
// SELECT dir0, b, suffix, c ...
builder.projection(RowSetTestUtils.projectList("dir0", "b", "suffix", "c"));
ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// ... FROM file
metadataManager.startFile(filePath);
ReaderSchemaOrchestrator reader = scanner.startReader();
// file schema (a, b)
TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
// Create the table loader
ResultSetLoader loader = reader.makeTableLoader(tableSchema);
TupleMetadata expectedSchema = new SchemaBuilder().addNullable("dir0", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("suffix", MinorType.VARCHAR).addNullable("c", MinorType.INT).buildSchema();
// Create a batch of data.
reader.startBatch();
loader.writer().addRow(1, "fred").addRow(2, "wilma");
reader.endBatch();
// Verify
SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow("x", "fred", "csv", null).addRow("x", "wilma", "csv", null).build();
RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
scanner.close();
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.
the class TestColumnsArray method buildScanner.
private MockScanner buildScanner(List<SchemaPath> projList) {
MockScanner mock = new MockScanner();
// Set up the file metadata manager
Path filePath = new Path("hdfs:///w/x/y/z.csv");
ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(filePath));
// ...and the columns array manager
ColumnsArrayManager colsManager = new ColumnsArrayManager(false);
// Configure the schema orchestrator
ScanOrchestratorBuilder builder = new MockScanBuilder();
builder.withImplicitColumns(metadataManager);
builder.addParser(colsManager.projectionParser());
builder.addResolver(colsManager.resolver());
// SELECT <proj list> ...
builder.projection(projList);
mock.scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// FROM z.csv
metadataManager.startFile(filePath);
mock.reader = mock.scanner.startReader();
// Table schema (columns: VARCHAR[])
TupleMetadata tableSchema = new SchemaBuilder().addArray(ColumnsScanFramework.COLUMNS_COL, MinorType.VARCHAR).buildSchema();
mock.loader = mock.reader.makeTableLoader(tableSchema);
// First empty batch
mock.reader.defineSchema();
return mock;
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.
the class TestColumnsArray method buildScan.
private ScanSchemaOrchestrator buildScan(boolean requireColumns, List<SchemaPath> cols) {
// Set up the columns array manager
ColumnsArrayManager colsManager = new ColumnsArrayManager(requireColumns);
// Configure the schema orchestrator
ScanOrchestratorBuilder builder = new ColumnsScanBuilder();
builder.addParser(colsManager.projectionParser());
builder.addResolver(colsManager.resolver());
builder.projection(cols);
return new ScanSchemaOrchestrator(fixture.allocator(), builder);
}
use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.
the class TestSchemaSmoothing method testWildcardSmoothing.
/**
* A SELECT * query uses the schema of the table as the output schema.
* This is trivial when the scanner has one table. But, if two or more
* tables occur, then things get interesting. The first table sets the
* schema. The second table then has:
* <ul>
* <li>The same schema, trivial case.</li>
* <li>A subset of the first table. The type of the "missing" column
* from the first table is used for a null column in the second table.</li>
* <li>A superset or disjoint set of the first schema. This triggers a hard schema
* change.</li>
* </ul>
* <p>
* It is an open question whether previous columns should be preserved on
* a hard reset. For now, the code implements, and this test verifies, that a
* hard reset clears the "memory" of prior schemas.
*/
@Test
public void testWildcardSmoothing() {
ScanOrchestratorBuilder builder = new MockScanBuilder();
builder.enableSchemaSmoothing(true);
builder.projection(RowSetTestUtils.projectAll());
final ScanSchemaOrchestrator projector = new ScanSchemaOrchestrator(fixture.allocator(), builder);
final TupleMetadata firstSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).addNullable("c", MinorType.BIGINT).buildSchema();
final TupleMetadata subsetSchema = new SchemaBuilder().addNullable("b", MinorType.VARCHAR, 10).add("a", MinorType.INT).buildSchema();
final TupleMetadata disjointSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).add("d", MinorType.VARCHAR).buildSchema();
final SchemaTracker tracker = new SchemaTracker();
int schemaVersion;
{
// First table, establishes the baseline
// ... FROM table 1
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(firstSchema);
reader.startBatch();
loader.writer().addRow(10, "fred", 110L).addRow(20, "wilma", 110L);
reader.endBatch();
tracker.trackSchema(projector.output());
schemaVersion = tracker.schemaVersion();
final SingleRowSet expected = fixture.rowSetBuilder(firstSchema).addRow(10, "fred", 110L).addRow(20, "wilma", 110L).build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
{
// Second table, same schema, the trivial case
// ... FROM table 2
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(firstSchema);
reader.startBatch();
loader.writer().addRow(70, "pebbles", 770L).addRow(80, "hoppy", 880L);
reader.endBatch();
tracker.trackSchema(projector.output());
assertEquals(schemaVersion, tracker.schemaVersion());
final SingleRowSet expected = fixture.rowSetBuilder(firstSchema).addRow(70, "pebbles", 770L).addRow(80, "hoppy", 880L).build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
{
// Third table: subset schema of first two
// ... FROM table 3
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(subsetSchema);
reader.startBatch();
loader.writer().addRow("bambam", 30).addRow("betty", 40);
reader.endBatch();
tracker.trackSchema(projector.output());
assertEquals(schemaVersion, tracker.schemaVersion());
final SingleRowSet expected = fixture.rowSetBuilder(firstSchema).addRow(30, "bambam", null).addRow(40, "betty", null).build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
{
// Fourth table: disjoint schema, cases a schema reset
// ... FROM table 4
final ReaderSchemaOrchestrator reader = projector.startReader();
final ResultSetLoader loader = reader.makeTableLoader(disjointSchema);
reader.startBatch();
loader.writer().addRow(50, "dino", "supporting").addRow(60, "barney", "main");
reader.endBatch();
tracker.trackSchema(projector.output());
assertNotEquals(schemaVersion, tracker.schemaVersion());
final SingleRowSet expected = fixture.rowSetBuilder(disjointSchema).addRow(50, "dino", "supporting").addRow(60, "barney", "main").build();
new RowSetComparison(expected).verifyAndClearAll(fixture.wrap(projector.output()));
}
projector.close();
}
Aggregations