use of org.apache.drill.exec.physical.impl.scan.columns.ColumnsArrayManager in project drill by apache.
the class TestColumnsArray method buildScanner.
private MockScanner buildScanner(List<SchemaPath> projList) {
MockScanner mock = new MockScanner();
// Set up the file metadata manager
Path filePath = new Path("hdfs:///w/x/y/z.csv");
ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(filePath));
// ...and the columns array manager
ColumnsArrayManager colsManager = new ColumnsArrayManager(false);
// Configure the schema orchestrator
ScanOrchestratorBuilder builder = new MockScanBuilder();
builder.withImplicitColumns(metadataManager);
builder.addParser(colsManager.projectionParser());
builder.addResolver(colsManager.resolver());
// SELECT <proj list> ...
builder.projection(projList);
mock.scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
// FROM z.csv
metadataManager.startFile(filePath);
mock.reader = mock.scanner.startReader();
// Table schema (columns: VARCHAR[])
TupleMetadata tableSchema = new SchemaBuilder().addArray(ColumnsScanFramework.COLUMNS_COL, MinorType.VARCHAR).buildSchema();
mock.loader = mock.reader.makeTableLoader(tableSchema);
// First empty batch
mock.reader.defineSchema();
return mock;
}
use of org.apache.drill.exec.physical.impl.scan.columns.ColumnsArrayManager in project drill by apache.
the class TestColumnsArray method buildScan.
private ScanSchemaOrchestrator buildScan(boolean requireColumns, List<SchemaPath> cols) {
// Set up the columns array manager
ColumnsArrayManager colsManager = new ColumnsArrayManager(requireColumns);
// Configure the schema orchestrator
ScanOrchestratorBuilder builder = new ColumnsScanBuilder();
builder.addParser(colsManager.projectionParser());
builder.addResolver(colsManager.resolver());
builder.projection(cols);
return new ScanSchemaOrchestrator(fixture.allocator(), builder);
}
Aggregations