Search in sources :

Example 21 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testTypeSmoothing.

/**
 * Test the ability of the scan scanner to "smooth" out schema changes
 * by reusing the type from a previous reader, if known. That is,
 * given three readers:<br>
 * (a, b)<br>
 * (b)<br>
 * (a, b)<br>
 * Then the type of column a should be preserved for the second reader that
 * does not include a. This works if a is nullable. If so, a's type will
 * be used for the empty column, rather than the usual nullable int.
 * <p>
 * Detailed testing of type matching for "missing" columns is done
 * in {@link #testNullColumnLoader()}.
 * <p>
 * As a side effect, makes sure that two identical tables (in this case,
 * separated by a different table) results in no schema change.
 */
@Test
public void testTypeSmoothing() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    // SELECT a, b ...
    builder.projection(RowSetTestUtils.projectList("a", "b"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // file schema (a, b)
    TupleMetadata twoColSchema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).buildSchema();
    SchemaTracker tracker = new SchemaTracker();
    int schemaVersion;
    {
        // ... FROM table 1
        ReaderSchemaOrchestrator reader = scanner.startReader();
        ResultSetLoader loader = reader.makeTableLoader(twoColSchema);
        // Projection of (a, b) to (a, b)
        reader.startBatch();
        loader.writer().addRow(10, "fred").addRow(20, "wilma");
        reader.endBatch();
        tracker.trackSchema(scanner.output());
        schemaVersion = tracker.schemaVersion();
        SingleRowSet expected = fixture.rowSetBuilder(twoColSchema).addRow(10, "fred").addRow(20, "wilma").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    {
        // ... FROM table 2
        ReaderSchemaOrchestrator reader = scanner.startReader();
        // File schema (a)
        TupleMetadata oneColSchema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
        // Projection of (a) to (a, b), reusing b from above.
        ResultSetLoader loader = reader.makeTableLoader(oneColSchema);
        reader.startBatch();
        loader.writer().addRow(30).addRow(40);
        reader.endBatch();
        tracker.trackSchema(scanner.output());
        assertEquals(schemaVersion, tracker.schemaVersion());
        SingleRowSet expected = fixture.rowSetBuilder(twoColSchema).addRow(30, null).addRow(40, null).build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    {
        // ... FROM table 3
        ReaderSchemaOrchestrator reader = scanner.startReader();
        // Projection of (a, b), to (a, b), reusing b yet again
        ResultSetLoader loader = reader.makeTableLoader(twoColSchema);
        reader.startBatch();
        loader.writer().addRow(50, "dino").addRow(60, "barney");
        reader.endBatch();
        tracker.trackSchema(scanner.output());
        assertEquals(schemaVersion, tracker.schemaVersion());
        SingleRowSet expected = fixture.rowSetBuilder(twoColSchema).addRow(50, "dino").addRow(60, "barney").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    scanner.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) SchemaTracker(org.apache.drill.exec.physical.impl.protocol.SchemaTracker) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 22 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testModeSmoothing.

@Test
public void testModeSmoothing() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    builder.enableSchemaSmoothing(true);
    builder.projection(RowSetTestUtils.projectList("a"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // Most general schema: nullable, with precision.
    TupleMetadata schema1 = new SchemaBuilder().addNullable("a", MinorType.VARCHAR, 10).buildSchema();
    SchemaTracker tracker = new SchemaTracker();
    int schemaVersion;
    {
        // Table 1: most permissive type
        ReaderSchemaOrchestrator reader = scanner.startReader();
        ResultSetLoader loader = reader.makeTableLoader(schema1);
        // Create a batch
        reader.startBatch();
        loader.writer().addRow("fred").addRow("wilma");
        reader.endBatch();
        tracker.trackSchema(scanner.output());
        schemaVersion = tracker.schemaVersion();
        // Verify
        SingleRowSet expected = fixture.rowSetBuilder(schema1).addRow("fred").addRow("wilma").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
        scanner.closeReader();
    }
    {
        // Table 2: required, use nullable
        // Required version.
        TupleMetadata schema2 = new SchemaBuilder().add("a", MinorType.VARCHAR, 10).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        ResultSetLoader loader = reader.makeTableLoader(schema2);
        // Create a batch
        reader.startBatch();
        loader.writer().addRow("barney").addRow("betty");
        reader.endBatch();
        // Verify, using persistent schema
        tracker.trackSchema(scanner.output());
        assertEquals(schemaVersion, tracker.schemaVersion());
        SingleRowSet expected = fixture.rowSetBuilder(schema1).addRow("barney").addRow("betty").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
        scanner.closeReader();
    }
    {
        // Table 3: narrower precision, use wider
        // Required version with narrower precision.
        TupleMetadata schema3 = new SchemaBuilder().add("a", MinorType.VARCHAR, 5).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        ResultSetLoader loader = reader.makeTableLoader(schema3);
        // Create a batch
        reader.startBatch();
        loader.writer().addRow("bam-bam").addRow("pebbles");
        reader.endBatch();
        // Verify, using persistent schema
        tracker.trackSchema(scanner.output());
        assertEquals(schemaVersion, tracker.schemaVersion());
        SingleRowSet expected = fixture.rowSetBuilder(schema1).addRow("bam-bam").addRow("pebbles").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
        scanner.closeReader();
    }
    scanner.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) SchemaTracker(org.apache.drill.exec.physical.impl.protocol.SchemaTracker) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 23 with ScanOrchestratorBuilder

use of org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testEarlySchemaWildcard.

/**
 * Test SELECT * from an early-schema table of (a, b)
 */
@Test
public void testEarlySchemaWildcard() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    // SELECT * ...
    builder.projection(RowSetTestUtils.projectAll());
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM table
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    // Create the table loader
    ResultSetLoader loader = reader.makeTableLoader(tableSchema);
    // Simulate a first reader in a scan that can provide an
    // empty batch to define schema.
    {
        reader.defineSchema();
        SingleRowSet expected = fixture.rowSetBuilder(tableSchema).build();
        assertNotNull(scanner.output());
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    // Create a batch of data.
    reader.startBatch();
    loader.writer().addRow(1, "fred").addRow(2, "wilma");
    reader.endBatch();
    // Verify
    {
        SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(1, "fred").addRow(2, "wilma").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    // Second batch.
    reader.startBatch();
    loader.writer().addRow(3, "barney").addRow(4, "betty");
    reader.endBatch();
    // Verify
    {
        SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(3, "barney").addRow(4, "betty").build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    // Explicit reader close. (All other tests are lazy, they
    // use an implicit close.)
    scanner.closeReader();
    scanner.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

ScanOrchestratorBuilder (org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder)23 MockScanBuilder (org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder)22 ScanSchemaOrchestrator (org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator)22 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)22 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)22 SubOperatorTest (org.apache.drill.test.SubOperatorTest)21 Test (org.junit.Test)21 ReaderSchemaOrchestrator (org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator)20 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)20 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)18 BatchSchemaBuilder (org.apache.drill.exec.record.BatchSchemaBuilder)13 SchemaPath (org.apache.drill.common.expression.SchemaPath)6 SchemaTracker (org.apache.drill.exec.physical.impl.protocol.SchemaTracker)6 ImplicitColumnManager (org.apache.drill.exec.physical.impl.scan.file.ImplicitColumnManager)6 Path (org.apache.hadoop.fs.Path)6 File (java.io.File)5 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)3 MajorType (org.apache.drill.common.types.TypeProtos.MajorType)2 ColumnsArrayManager (org.apache.drill.exec.physical.impl.scan.columns.ColumnsArrayManager)2 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)2