Search in sources :

Example 1 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorImplicitColumns method testSelectNone.

/**
 * Test SELECT c FROM table(a, b)
 * The result set will be one null column for each record, but
 * no file data.
 */
@Test
public void testSelectNone() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    File file = dirTestWatcher.copyResourceToRoot(Paths.get("multilevel", "csv", "1994", "Q1", "orders_94_q1.csv"), Paths.get("x", "y", "z.csv"));
    Path filePath = new Path(file.toURI().getPath());
    ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(filePath));
    builder.withImplicitColumns(metadataManager);
    // SELECT c ...
    builder.projection(RowSetTestUtils.projectList("c"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM file
    metadataManager.startFile(filePath);
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    // Create the table loader
    ResultSetLoader loader = reader.makeTableLoader(tableSchema);
    TupleMetadata expectedSchema = new SchemaBuilder().addNullable("c", MinorType.INT).buildSchema();
    // Create a batch of data.
    reader.startBatch();
    loader.writer().addRow(1, "fred").addRow(2, "wilma");
    reader.endBatch();
    // Verify
    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addSingleCol(null).addSingleCol(null).build();
    RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    scanner.close();
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) ImplicitColumnManager(org.apache.drill.exec.physical.impl.scan.file.ImplicitColumnManager) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) File(java.io.File) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 2 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorImplicitColumns method testEarlySchemaSelectAllAndMetadata.

/**
 * Test SELECT a, b, dir0, suffix FROM table(a, b)
 * dir0, suffix are file metadata columns
 */
@Test
public void testEarlySchemaSelectAllAndMetadata() {
    // Null columns of type VARCHAR
    MajorType nullType = MajorType.newBuilder().setMinorType(MinorType.VARCHAR).setMode(DataMode.OPTIONAL).build();
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    builder.nullType(nullType);
    File file = dirTestWatcher.copyResourceToRoot(Paths.get("multilevel", "csv", "1994", "Q1", "orders_94_q1.csv"), Paths.get("x", "y", "z.csv"));
    Path filePath = new Path(file.toURI().getPath());
    ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(filePath));
    builder.withImplicitColumns(metadataManager);
    // SELECT a, b, dir0, suffix ...
    builder.projection(RowSetTestUtils.projectList("a", "b", "dir0", "suffix"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM file
    metadataManager.startFile(filePath);
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    // Create the table loader
    ResultSetLoader loader = reader.makeTableLoader(tableSchema);
    // Verify empty batch.
    reader.defineSchema();
    TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).add("suffix", MinorType.VARCHAR).buildSchema();
    {
        SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).build();
        assertNotNull(scanner.output());
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    // Create a batch of data.
    reader.startBatch();
    loader.writer().addRow(1, "fred").addRow(2, "wilma");
    reader.endBatch();
    // Verify
    {
        SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(1, "fred", "x", "csv").addRow(2, "wilma", "x", "csv").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    scanner.close();
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) ImplicitColumnManager(org.apache.drill.exec.physical.impl.scan.file.ImplicitColumnManager) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) MajorType(org.apache.drill.common.types.TypeProtos.MajorType) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) File(java.io.File) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 3 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorImplicitColumns method testWildcardWithMetadata.

/**
 * Resolve a selection list using SELECT *.
 */
@Test
public void testWildcardWithMetadata() throws IOException {
    File file = dirTestWatcher.copyResourceToRoot(Paths.get("multilevel", "csv", "1994", "Q1", "orders_94_q1.csv"), Paths.get("x", "y", "z.csv"));
    Path filePath = new Path(file.toURI().getPath());
    DrillFileSystem fileSystem = new DrillFileSystem(new Configuration());
    ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(filePath), fileSystem);
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    builder.withImplicitColumns(metadataManager);
    // SELECT *, filename, suffix ...
    builder.projection(RowSetTestUtils.projectList(SchemaPath.DYNAMIC_STAR, ScanTestUtils.FULLY_QUALIFIED_NAME_COL, ScanTestUtils.FILE_PATH_COL, ScanTestUtils.FILE_NAME_COL, ScanTestUtils.SUFFIX_COL, ScanTestUtils.LAST_MODIFIED_TIME_COL, ScanTestUtils.PROJECT_METADATA_COL, ScanTestUtils.partitionColName(0), ScanTestUtils.partitionColName(1)));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM file
    metadataManager.startFile(filePath);
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    ResultSetLoader loader = reader.makeTableLoader(tableSchema);
    // Create a batch of data.
    reader.startBatch();
    loader.writer().addRow(1, "fred").addRow(2, "wilma");
    reader.endBatch();
    // Verify
    TupleMetadata expectedSchema = ScanTestUtils.expandImplicit(tableSchema, metadataManager, 2);
    String fqn = ImplicitFileColumns.FQN.getValue(filePath);
    String filePathValue = ImplicitFileColumns.FILEPATH.getValue(filePath);
    String fileName = ImplicitFileColumns.FILENAME.getValue(filePath);
    String suffix = ImplicitFileColumns.SUFFIX.getValue(filePath);
    String lastModifiedTime = ColumnExplorer.getImplicitColumnValue(ImplicitInternalFileColumns.LAST_MODIFIED_TIME, filePath, fileSystem);
    String projectMetadata = ColumnExplorer.getImplicitColumnValue(ImplicitInternalFileColumns.USE_METADATA, filePath, fileSystem);
    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(1, "fred", fqn, filePathValue, fileName, suffix, lastModifiedTime, projectMetadata, "x", "y").addRow(2, "wilma", fqn, filePathValue, fileName, suffix, lastModifiedTime, projectMetadata, "x", "y").build();
    RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    scanner.close();
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) ImplicitColumnManager(org.apache.drill.exec.physical.impl.scan.file.ImplicitColumnManager) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) Configuration(org.apache.hadoop.conf.Configuration) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) DrillFileSystem(org.apache.drill.exec.store.dfs.DrillFileSystem) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) File(java.io.File) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 4 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorImplicitColumns method testMetadataMulti.

/**
 * Verify that metadata columns follow distinct files
 * <br>
 * SELECT dir0, filename, b FROM (a.csv, b.csv)
 */
@Test
public void testMetadataMulti() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    File file = dirTestWatcher.copyResourceToRoot(Paths.get("multilevel", "csv", "1994", "Q1", "orders_94_q1.csv"), Paths.get("x", "y", "a.csv"));
    Path filePathA = new Path(file.toURI().getPath());
    File file2 = dirTestWatcher.copyResourceToRoot(Paths.get("multilevel", "csv", "1994", "Q2", "orders_94_q2.csv"), Paths.get("x", "b.csv"));
    Path filePathB = new Path(file2.toURI().getPath());
    ImplicitColumnManager metadataManager = new ImplicitColumnManager(fixture.getOptionManager(), standardOptions(Lists.newArrayList(filePathA, filePathB)));
    builder.withImplicitColumns(metadataManager);
    // SELECT dir0, dir1, filename, b ...
    builder.projection(RowSetTestUtils.projectList(ScanTestUtils.partitionColName(0), ScanTestUtils.partitionColName(1), ScanTestUtils.FILE_NAME_COL, "b"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).buildSchema();
    TupleMetadata expectedSchema = new SchemaBuilder().addNullable(ScanTestUtils.partitionColName(0), MinorType.VARCHAR).addNullable(ScanTestUtils.partitionColName(1), MinorType.VARCHAR).add(ScanTestUtils.FILE_NAME_COL, MinorType.VARCHAR).addNullable("b", MinorType.VARCHAR, 10).buildSchema();
    SchemaTracker tracker = new SchemaTracker();
    int schemaVersion;
    {
        // ... FROM file a.csv
        metadataManager.startFile(filePathA);
        ReaderSchemaOrchestrator reader = scanner.startReader();
        ResultSetLoader loader = reader.makeTableLoader(tableSchema);
        reader.startBatch();
        loader.writer().addRow(10, "fred").addRow(20, "wilma");
        reader.endBatch();
        tracker.trackSchema(scanner.output());
        schemaVersion = tracker.schemaVersion();
        SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow("x", "y", "a.csv", "fred").addRow("x", "y", "a.csv", "wilma").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
        // Do explicit close (as in real code) to avoid an implicit
        // close which will blow away the current file info...
        scanner.closeReader();
    }
    {
        // ... FROM file b.csv
        metadataManager.startFile(filePathB);
        ReaderSchemaOrchestrator reader = scanner.startReader();
        ResultSetLoader loader = reader.makeTableLoader(tableSchema);
        reader.startBatch();
        loader.writer().addRow(30, "bambam").addRow(40, "betty");
        reader.endBatch();
        tracker.trackSchema(scanner.output());
        assertEquals(schemaVersion, tracker.schemaVersion());
        SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow("x", null, "b.csv", "bambam").addRow("x", null, "b.csv", "betty").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
        scanner.closeReader();
    }
    scanner.close();
}
Also used : Path(org.apache.hadoop.fs.Path) SchemaPath(org.apache.drill.common.expression.SchemaPath) ImplicitColumnManager(org.apache.drill.exec.physical.impl.scan.file.ImplicitColumnManager) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) SchemaTracker(org.apache.drill.exec.physical.impl.protocol.SchemaTracker) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) File(java.io.File) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 5 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testTypeSmoothingExplicit.

/**
 * The projection mechanism provides "type smoothing": null
 * columns prefer the type of previously-seen non-null columns.
 *
 * <code><pre>
 * SELECT a, b ...
 *
 * Table 1: (a: BIGINT, b: VARCHAR)
 * Table 2: (a: BIGINT)
 * Table 3: (b: VARCHAR)
 * </pre></code>
 * The result in all cases should be
 * <tt>(a : BIGINT, b: VARCHAR)</tt>
 */
@Test
public void testTypeSmoothingExplicit() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    TupleMetadata table1Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addNullable("B", MinorType.VARCHAR).addArray("C", MinorType.INT).buildSchema();
    BatchSchema resultSchema = new BatchSchema(SelectionVectorMode.NONE, table1Schema.toFieldList());
    SchemaTracker tracker = new SchemaTracker();
    // SELECT * ...
    builder.projection(RowSetTestUtils.projectList("a", "b", "c"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    int schemaVersion;
    {
        // ... FROM table1(a, b, c)
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table1Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        schemaVersion = tracker.schemaVersion();
        assertTrue(resultSchema.isEquivalent(output.getSchema()));
        scanner.closeReader();
    }
    {
        // ... FROM table1(a, c)
        // 
        // B is dropped. But, it is nullable, so the vector cache
        // can supply the proper type to ensure continuity.
        TupleMetadata table2Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addArray("C", MinorType.INT).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table2Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        assertEquals(schemaVersion, tracker.schemaVersion());
        assertTrue(resultSchema.isEquivalent(output.getSchema()));
        scanner.closeReader();
    }
    {
        // ... FROM table1(a, b)
        // 
        // C is dropped. But, it is an array, which uses zero-elements
        // to indicate null, so the vector cache can fill in the type.
        TupleMetadata table3Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addNullable("B", MinorType.VARCHAR).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table3Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        assertEquals(schemaVersion, tracker.schemaVersion());
        assertTrue(resultSchema.isEquivalent(output.getSchema()));
        scanner.closeReader();
    }
    {
        // ... FROM table1(b, c)
        // 
        // This version carries over a non-nullable BIGINT, but that
        // can't become a null column, so nullable BIGINT is substituted,
        // result in a schema change.
        TupleMetadata table2Schema = new SchemaBuilder().addNullable("B", MinorType.VARCHAR).addArray("C", MinorType.INT).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table2Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        assertEquals(MinorType.BIGINT, output.getSchema().getColumn(0).getType().getMinorType());
        assertEquals(DataMode.OPTIONAL, output.getSchema().getColumn(0).getType().getMode());
        assertTrue(schemaVersion < tracker.schemaVersion());
        scanner.closeReader();
    }
    scanner.close();
}
Also used : BatchSchema(org.apache.drill.exec.record.BatchSchema) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) SchemaTracker(org.apache.drill.exec.physical.impl.protocol.SchemaTracker) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) VectorContainer(org.apache.drill.exec.record.VectorContainer) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

ScanOrchestratorBuilder (org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder)23 MockScanBuilder (org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder)22 ScanSchemaOrchestrator (org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator)22 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)22 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)22 SubOperatorTest (org.apache.drill.test.SubOperatorTest)21 Test (org.junit.Test)21 ReaderSchemaOrchestrator (org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator)20 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)20 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)18 BatchSchemaBuilder (org.apache.drill.exec.record.BatchSchemaBuilder)13 SchemaPath (org.apache.drill.common.expression.SchemaPath)6 SchemaTracker (org.apache.drill.exec.physical.impl.protocol.SchemaTracker)6 ImplicitColumnManager (org.apache.drill.exec.physical.impl.scan.file.ImplicitColumnManager)6 Path (org.apache.hadoop.fs.Path)6 File (java.io.File)5 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)3 MajorType (org.apache.drill.common.types.TypeProtos.MajorType)2 ColumnsArrayManager (org.apache.drill.exec.physical.impl.scan.columns.ColumnsArrayManager)2 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)2