Search in sources :

Example 6 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testTypeSmoothingExplicit.

/**
 * The projection mechanism provides "type smoothing": null
 * columns prefer the type of previously-seen non-null columns.
 *
 * <code><pre>
 * SELECT a, b ...
 *
 * Table 1: (a: BIGINT, b: VARCHAR)
 * Table 2: (a: BIGINT)
 * Table 3: (b: VARCHAR)
 * </pre></code>
 * The result in all cases should be
 * <tt>(a : BIGINT, b: VARCHAR)</tt>
 */
@Test
public void testTypeSmoothingExplicit() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    TupleMetadata table1Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addNullable("B", MinorType.VARCHAR).addArray("C", MinorType.INT).buildSchema();
    BatchSchema resultSchema = new BatchSchema(SelectionVectorMode.NONE, table1Schema.toFieldList());
    SchemaTracker tracker = new SchemaTracker();
    // SELECT * ...
    builder.projection(RowSetTestUtils.projectList("a", "b", "c"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    int schemaVersion;
    {
        // ... FROM table1(a, b, c)
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table1Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        schemaVersion = tracker.schemaVersion();
        assertTrue(resultSchema.isEquivalent(output.getSchema()));
        scanner.closeReader();
    }
    {
        // ... FROM table1(a, c)
        // 
        // B is dropped. But, it is nullable, so the vector cache
        // can supply the proper type to ensure continuity.
        TupleMetadata table2Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addArray("C", MinorType.INT).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table2Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        assertEquals(schemaVersion, tracker.schemaVersion());
        assertTrue(resultSchema.isEquivalent(output.getSchema()));
        scanner.closeReader();
    }
    {
        // ... FROM table1(a, b)
        // 
        // C is dropped. But, it is an array, which uses zero-elements
        // to indicate null, so the vector cache can fill in the type.
        TupleMetadata table3Schema = new SchemaBuilder().add("A", MinorType.BIGINT).addNullable("B", MinorType.VARCHAR).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table3Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        assertEquals(schemaVersion, tracker.schemaVersion());
        assertTrue(resultSchema.isEquivalent(output.getSchema()));
        scanner.closeReader();
    }
    {
        // ... FROM table1(b, c)
        // 
        // This version carries over a non-nullable BIGINT, but that
        // can't become a null column, so nullable BIGINT is substituted,
        // result in a schema change.
        TupleMetadata table2Schema = new SchemaBuilder().addNullable("B", MinorType.VARCHAR).addArray("C", MinorType.INT).buildSchema();
        ReaderSchemaOrchestrator reader = scanner.startReader();
        reader.makeTableLoader(table2Schema);
        reader.defineSchema();
        VectorContainer output = scanner.output();
        tracker.trackSchema(output);
        assertEquals(MinorType.BIGINT, output.getSchema().getColumn(0).getType().getMinorType());
        assertEquals(DataMode.OPTIONAL, output.getSchema().getColumn(0).getType().getMode());
        assertTrue(schemaVersion < tracker.schemaVersion());
        scanner.closeReader();
    }
    scanner.close();
}
Also used : BatchSchema(org.apache.drill.exec.record.BatchSchema) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) SchemaTracker(org.apache.drill.exec.physical.impl.protocol.SchemaTracker) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) VectorContainer(org.apache.drill.exec.record.VectorContainer) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 7 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testEarlySchemaSelectAll.

/**
 * Test SELECT a, b FROM table(a, b)
 */
@Test
public void testEarlySchemaSelectAll() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    // SELECT a, b ...
    builder.projection(RowSetTestUtils.projectList("a", "b"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM table
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    // Create the table loader
    ResultSetLoader loader = reader.makeTableLoader(tableSchema);
    // Don't bother with an empty batch here or in other tests.
    // Simulates the second reader in a scan.
    // Create a batch of data.
    reader.startBatch();
    loader.writer().addRow(1, "fred").addRow(2, "wilma");
    reader.endBatch();
    // Verify
    SingleRowSet expected = fixture.rowSetBuilder(tableSchema).addRow(1, "fred").addRow(2, "wilma").build();
    RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    scanner.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 8 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testEarlySchemaSelectNone.

/**
 * Test SELECT - FROM table(a, b)
 */
@Test
public void testEarlySchemaSelectNone() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    // SELECT ...
    // (Like SELECT COUNT(*) ...
    builder.projection(RowSetTestUtils.projectList());
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM table
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    // Create the table loader
    ResultSetLoader loader = reader.makeTableLoader(tableSchema);
    // Verify that unprojected column is unprojected in the
    // table loader.
    assertTrue(loader.isProjectionEmpty());
    assertFalse(loader.writer().column("a").isProjected());
    assertFalse(loader.writer().column("b").isProjected());
    // Verify empty batch.
    BatchSchema expectedSchema = new BatchSchemaBuilder().withSchemaBuilder(new SchemaBuilder()).build();
    // Create a batch of data.
    reader.startBatch();
    loader.writer().addRow(1, "fred").addRow(2, "wilma");
    reader.endBatch();
    // Verify
    {
        // Two rows, no data.
        SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow().addRow().build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    // Fast path to fill in empty rows
    reader.startBatch();
    loader.skipRows(10);
    reader.endBatch();
    // Verify
    {
        VectorContainer output = scanner.output();
        assertEquals(10, output.getRecordCount());
        output.zeroVectors();
    }
    scanner.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) BatchSchema(org.apache.drill.exec.record.BatchSchema) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) VectorContainer(org.apache.drill.exec.record.VectorContainer) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 9 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testEarlySchemaSelectExtra.

/**
 * Test SELECT a, b, c FROM table(a, b)
 * c will be null
 */
@Test
public void testEarlySchemaSelectExtra() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    // SELECT a, b, c ...
    builder.projection(RowSetTestUtils.projectList("a", "b", "c"));
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM table
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema (a, b)
    TupleMetadata tableSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    // Create the table loader
    ResultSetLoader loader = reader.makeTableLoader(tableSchema);
    TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).addNullable("c", MinorType.INT).buildSchema();
    // Create a batch of data.
    reader.startBatch();
    loader.writer().addRow(1, "fred").addRow(2, "wilma");
    reader.endBatch();
    // Verify
    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(1, "fred", null).addRow(2, "wilma", null).build();
    RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    scanner.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 10 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestScanOrchestratorEarlySchema method testEmptySchema.

/**
 * Test SELECT * from an early-schema table of () (that is,
 * a schema that consists of zero columns.
 */
@Test
public void testEmptySchema() {
    ScanOrchestratorBuilder builder = new MockScanBuilder();
    // SELECT * ...
    builder.projection(RowSetTestUtils.projectAll());
    ScanSchemaOrchestrator scanner = new ScanSchemaOrchestrator(fixture.allocator(), builder);
    // ... FROM table
    ReaderSchemaOrchestrator reader = scanner.startReader();
    // file schema ()
    TupleMetadata tableSchema = new SchemaBuilder().buildSchema();
    // Create the table loader
    reader.makeTableLoader(tableSchema);
    // Create a batch of data. Because there are no columns, it does
    // not make sense to ready any rows.
    reader.startBatch();
    reader.endBatch();
    // Verify
    {
        SingleRowSet expected = fixture.rowSetBuilder(tableSchema).build();
        RowSetUtilities.verify(expected, fixture.wrap(scanner.output()));
    }
    scanner.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ScanOrchestratorBuilder(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator.ScanOrchestratorBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) MockScanBuilder(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.MockScanBuilder) ScanSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ScanSchemaOrchestrator) ReaderSchemaOrchestrator(org.apache.drill.exec.physical.impl.scan.project.ReaderSchemaOrchestrator) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)1095 Test (org.junit.Test)1020 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)1008 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)588 SubOperatorTest (org.apache.drill.test.SubOperatorTest)407 RowSetBuilder (org.apache.drill.exec.physical.rowSet.RowSetBuilder)288 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)263 ClusterTest (org.apache.drill.test.ClusterTest)245 EvfTest (org.apache.drill.categories.EvfTest)203 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)188 JsonTest (org.apache.drill.categories.JsonTest)110 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)108 DirectRowSet (org.apache.drill.exec.physical.rowSet.DirectRowSet)108 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)85 BatchSchemaBuilder (org.apache.drill.exec.record.BatchSchemaBuilder)83 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)68 UserException (org.apache.drill.common.exceptions.UserException)62 BatchSchema (org.apache.drill.exec.record.BatchSchema)62 VectorContainer (org.apache.drill.exec.record.VectorContainer)58 BaseTest (org.apache.drill.test.BaseTest)57