Search in sources :

Example 21 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestDirectConverter method testDecimalOverflow.

@Test
public void testDecimalOverflow() {
    TupleMetadata outputSchema = new SchemaBuilder().add("id", MinorType.INT).add("dec", MinorType.VARDECIMAL, 4, 2).buildSchema();
    TupleMetadata inputSchema = new SchemaBuilder().add("id", MinorType.INT).add("dec", MinorType.VARCHAR).buildSchema();
    ConversionTestFixture testFixture = new ConversionTestFixture(fixture.allocator(), outputSchema);
    testFixture.createConvertersFor(inputSchema);
    try {
        testFixture.addRow(1, "1234567.89");
        fail();
    } catch (UserException e) {
    // Expected
    }
    testFixture.build().clear();
}
Also used : TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) UserException(org.apache.drill.common.exceptions.UserException) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 22 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestOperatorRecordBatch method testBatchAccessor.

/**
 * The record batch abstraction has a bunch of methods to work with a vector container.
 * Rather than simply exposing the container itself, the batch instead exposes various
 * container operations. Probably an artifact of its history. In any event, make
 * sure those methods are passed through to the container accessor.
 */
@Test
public void testBatchAccessor() {
    SchemaBuilder schemaBuilder = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR);
    BatchSchema schema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
    SingleRowSet rs = fixture.rowSetBuilder(schema).addRow(10, "fred").addRow(20, "wilma").build();
    MockOperatorExec opExec = new MockOperatorExec(rs.container());
    opExec.nextCalls = 1;
    try (OperatorRecordBatch opBatch = makeOpBatch(opExec)) {
        assertEquals(IterOutcome.OK_NEW_SCHEMA, opBatch.next());
        assertEquals(schema, opBatch.getSchema());
        assertEquals(2, opBatch.getRecordCount());
        assertSame(rs.container(), opBatch.getOutgoingContainer());
        Iterator<VectorWrapper<?>> iter = opBatch.iterator();
        assertEquals("a", iter.next().getValueVector().getField().getName());
        assertEquals("b", iter.next().getValueVector().getField().getName());
        // Not a full test of the schema path; just make sure that the
        // pass-through to the Vector Container works.
        SchemaPath path = SchemaPath.create(NamePart.newBuilder().setName("a").build());
        TypedFieldId id = opBatch.getValueVectorId(path);
        assertEquals(MinorType.INT, id.getFinalType().getMinorType());
        assertEquals(1, id.getFieldIds().length);
        assertEquals(0, id.getFieldIds()[0]);
        path = SchemaPath.create(NamePart.newBuilder().setName("b").build());
        id = opBatch.getValueVectorId(path);
        assertEquals(MinorType.VARCHAR, id.getFinalType().getMinorType());
        assertEquals(1, id.getFieldIds().length);
        assertEquals(1, id.getFieldIds()[0]);
        // Sanity check of getValueAccessorById()
        VectorWrapper<?> w = opBatch.getValueAccessorById(IntVector.class, 0);
        assertNotNull(w);
        assertEquals("a", w.getValueVector().getField().getName());
        w = opBatch.getValueAccessorById(VarCharVector.class, 1);
        assertNotNull(w);
        assertEquals("b", w.getValueVector().getField().getName());
        try {
            opBatch.getSelectionVector2();
            fail();
        } catch (UnsupportedOperationException e) {
        // Expected
        }
        try {
            opBatch.getSelectionVector4();
            fail();
        } catch (UnsupportedOperationException e) {
        // Expected
        }
    } catch (Exception e) {
        fail(e.getMessage());
    }
    assertTrue(opExec.closeCalled);
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) VectorWrapper(org.apache.drill.exec.record.VectorWrapper) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) VarCharVector(org.apache.drill.exec.vector.VarCharVector) UserException(org.apache.drill.common.exceptions.UserException) BatchSchema(org.apache.drill.exec.record.BatchSchema) SchemaPath(org.apache.drill.common.expression.SchemaPath) TypedFieldId(org.apache.drill.exec.record.TypedFieldId) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 23 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestOperatorRecordBatch method testSchemaChange.

@Test
public void testSchemaChange() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    SingleRowSet rs = fixture.rowSetBuilder(schema).addRow(10, "fred").addRow(20, "wilma").build();
    VectorContainer container = rs.container();
    MockOperatorExec opExec = new MockOperatorExec(container);
    int schemaVersion = opExec.batchAccessor().schemaVersion();
    // Be tidy: start at 1.
    assertEquals(1, schemaVersion);
    // Changing data does not trigger schema change
    container.zeroVectors();
    opExec.batchAccessor.addBatch(container);
    assertEquals(schemaVersion, opExec.batchAccessor().schemaVersion());
    // Different container, same vectors, does not trigger a change
    VectorContainer c2 = new VectorContainer(fixture.allocator());
    for (VectorWrapper<?> vw : container) {
        c2.add(vw.getValueVector());
    }
    c2.buildSchema(SelectionVectorMode.NONE);
    opExec.batchAccessor.addBatch(c2);
    assertEquals(schemaVersion, opExec.batchAccessor().schemaVersion());
    opExec.batchAccessor.addBatch(container);
    assertEquals(schemaVersion, opExec.batchAccessor().schemaVersion());
    // Replacing a vector with another of the same type does trigger
    // a change.
    VectorContainer c3 = new VectorContainer(fixture.allocator());
    c3.add(container.getValueVector(0).getValueVector());
    c3.add(TypeHelper.getNewVector(container.getValueVector(1).getValueVector().getField(), fixture.allocator(), null));
    c3.buildSchema(SelectionVectorMode.NONE);
    opExec.batchAccessor.addBatch(c3);
    assertEquals(schemaVersion + 1, opExec.batchAccessor().schemaVersion());
    schemaVersion = opExec.batchAccessor().schemaVersion();
    // No change if same schema again
    opExec.batchAccessor.addBatch(c3);
    assertEquals(schemaVersion, opExec.batchAccessor().schemaVersion());
    // Adding a vector triggers a change
    MaterializedField c = SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.OPTIONAL);
    c3.add(TypeHelper.getNewVector(c, fixture.allocator(), null));
    c3.buildSchema(SelectionVectorMode.NONE);
    opExec.batchAccessor.addBatch(c3);
    assertEquals(schemaVersion + 1, opExec.batchAccessor().schemaVersion());
    schemaVersion = opExec.batchAccessor().schemaVersion();
    // No change if same schema again
    opExec.batchAccessor.addBatch(c3);
    assertEquals(schemaVersion, opExec.batchAccessor().schemaVersion());
    // Removing a vector triggers a change
    c3.remove(c3.getValueVector(2).getValueVector());
    c3.buildSchema(SelectionVectorMode.NONE);
    assertEquals(2, c3.getNumberOfColumns());
    opExec.batchAccessor.addBatch(c3);
    assertEquals(schemaVersion + 1, opExec.batchAccessor().schemaVersion());
    schemaVersion = opExec.batchAccessor().schemaVersion();
    // Clean up
    opExec.close();
    c2.clear();
    c3.clear();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) MaterializedField(org.apache.drill.exec.record.MaterializedField) VectorContainer(org.apache.drill.exec.record.VectorContainer) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 24 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestColumnsArrayFramework method testColumnsIndexProjection.

/**
 * Test including a specific index of `columns` such as
 * `columns`[1].
 */
@Test
public void testColumnsIndexProjection() {
    // Create a mock reader, return two batches: one schema-only, another with data.
    DummyColumnsReader reader = new DummyColumnsReader(new SchemaBuilder().addArray(ColumnsScanFramework.COLUMNS_COL, MinorType.VARCHAR).buildSchema());
    // Create the scan operator
    ColumnsScanFixtureBuilder builder = new ColumnsScanFixtureBuilder();
    builder.setProjection(Lists.newArrayList(SchemaPath.parseFromString(ColumnsScanFramework.COLUMNS_COL + "[1]"), SchemaPath.parseFromString(ColumnsScanFramework.COLUMNS_COL + "[3]")));
    builder.addReader(reader);
    builder.builder.requireColumnsArray(true);
    ScanFixture scanFixture = builder.build();
    ScanOperatorExec scan = scanFixture.scanOp;
    // Start the one and only reader, and check the columns
    // schema info.
    assertTrue(scan.buildSchema());
    assertNotNull(reader.negotiator);
    assertTrue(reader.negotiator.columnsArrayProjected());
    boolean[] projIndexes = reader.negotiator.projectedIndexes();
    assertNotNull(projIndexes);
    assertEquals(4, projIndexes.length);
    assertFalse(projIndexes[0]);
    assertTrue(projIndexes[1]);
    assertFalse(projIndexes[2]);
    assertTrue(projIndexes[3]);
    scanFixture.close();
}
Also used : ScanFixture(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.ScanFixture) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 25 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestFileScanFramework method testMetadataColumns.

/**
 * Basic sanity test of a couple of implicit columns, along
 * with all table columns in table order. Full testing of implicit
 * columns is done on lower-level components.
 */
@Test
public void testMetadataColumns() {
    MockEarlySchemaReader reader = new MockEarlySchemaReader();
    reader.batchLimit = 1;
    // Select table and implicit columns.
    FileScanFixtureBuilder builder = new FileScanFixtureBuilder();
    builder.setProjection("a", "b", "filename", "suffix");
    builder.addReader(reader);
    ScanFixture scanFixture = builder.build();
    ScanOperatorExec scan = scanFixture.scanOp;
    // Expect data and implicit columns
    SchemaBuilder schemaBuilder = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR, 10).add("filename", MinorType.VARCHAR).add("suffix", MinorType.VARCHAR);
    BatchSchema expectedSchema = new BatchSchemaBuilder().withSchemaBuilder(schemaBuilder).build();
    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, "fred", MOCK_FILE_NAME, MOCK_SUFFIX).addRow(20, "wilma", MOCK_FILE_NAME, MOCK_SUFFIX).build();
    // Schema should include implicit columns.
    assertTrue(scan.buildSchema());
    assertEquals(expectedSchema, scan.batchAccessor().schema());
    scan.batchAccessor().release();
    // Read one batch, should contain implicit columns
    assertTrue(scan.next());
    RowSetUtilities.verify(expected, fixture.wrap(scan.batchAccessor().container()));
    // EOF
    assertFalse(scan.next());
    assertEquals(0, scan.batchAccessor().rowCount());
    scanFixture.close();
}
Also used : ScanFixture(org.apache.drill.exec.physical.impl.scan.ScanTestUtils.ScanFixture) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) BatchSchema(org.apache.drill.exec.record.BatchSchema) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)1095 Test (org.junit.Test)1020 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)1008 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)588 SubOperatorTest (org.apache.drill.test.SubOperatorTest)407 RowSetBuilder (org.apache.drill.exec.physical.rowSet.RowSetBuilder)288 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)263 ClusterTest (org.apache.drill.test.ClusterTest)245 EvfTest (org.apache.drill.categories.EvfTest)203 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)188 JsonTest (org.apache.drill.categories.JsonTest)110 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)108 DirectRowSet (org.apache.drill.exec.physical.rowSet.DirectRowSet)108 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)85 BatchSchemaBuilder (org.apache.drill.exec.record.BatchSchemaBuilder)83 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)68 UserException (org.apache.drill.common.exceptions.UserException)62 BatchSchema (org.apache.drill.exec.record.BatchSchema)62 VectorContainer (org.apache.drill.exec.record.VectorContainer)58 BaseTest (org.apache.drill.test.BaseTest)57