Search in sources :

Example 81 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestResultSetLoaderRepeatedList method do2DTest.

private void do2DTest(TupleMetadata schema, ResultSetLoader rsLoader) {
    final RowSetLoader writer = rsLoader.writer();
    // Sanity check of writer structure
    assertEquals(2, writer.size());
    final ObjectWriter listObj = writer.column("list2");
    assertEquals(ObjectType.ARRAY, listObj.type());
    final ArrayWriter listWriter = listObj.array();
    assertEquals(ObjectType.ARRAY, listWriter.entryType());
    final ArrayWriter innerWriter = listWriter.array();
    assertEquals(ObjectType.SCALAR, innerWriter.entryType());
    final ScalarWriter strWriter = innerWriter.scalar();
    assertEquals(ValueType.STRING, strWriter.valueType());
    // Sanity test of schema
    final TupleMetadata rowSchema = writer.tupleSchema();
    assertEquals(2, rowSchema.size());
    final ColumnMetadata listSchema = rowSchema.metadata(1);
    assertEquals(MinorType.LIST, listSchema.type());
    assertEquals(DataMode.REPEATED, listSchema.mode());
    assertTrue(listSchema instanceof RepeatedListColumnMetadata);
    assertEquals(StructureType.MULTI_ARRAY, listSchema.structureType());
    assertNotNull(listSchema.childSchema());
    final ColumnMetadata elementSchema = listSchema.childSchema();
    assertEquals(listSchema.name(), elementSchema.name());
    assertEquals(MinorType.VARCHAR, elementSchema.type());
    assertEquals(DataMode.REPEATED, elementSchema.mode());
    // Write values
    rsLoader.startBatch();
    writer.addRow(1, objArray(strArray("a", "b"), strArray("c", "d"))).addRow(2, objArray(strArray("e"), strArray(), strArray("f", "g", "h"))).addRow(3, objArray()).addRow(4, objArray(strArray(), strArray("i"), strArray()));
    // Verify the values.
    // (Relies on the row set level repeated list tests having passed.)
    final RowSet expected = fixture.rowSetBuilder(schema).addRow(1, objArray(strArray("a", "b"), strArray("c", "d"))).addRow(2, objArray(strArray("e"), strArray(), strArray("f", "g", "h"))).addRow(3, objArray()).addRow(4, objArray(strArray(), strArray("i"), strArray())).build();
    RowSetUtilities.verify(expected, fixture.wrap(rsLoader.harvest()));
}
Also used : ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) RepeatedListColumnMetadata(org.apache.drill.exec.record.metadata.RepeatedListColumnMetadata) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) ObjectWriter(org.apache.drill.exec.vector.accessor.ObjectWriter) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) RepeatedListColumnMetadata(org.apache.drill.exec.record.metadata.RepeatedListColumnMetadata)

Example 82 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestResultSetLoaderUnions method testSimpleList.

/**
 * Test for the case of a list defined to contain exactly one type.
 * Relies on the row set tests to verify that the single type model
 * works for lists. Here we test that the ResultSetLoader put the
 * pieces together correctly.
 */
@Test
public void testSimpleList() {
    // Schema with a list declared with one type, not expandable
    final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addList("list").addType(MinorType.VARCHAR).resumeSchema().buildSchema();
    schema.metadata("list").variantSchema().becomeSimple();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    final RowSetLoader writer = rsLoader.writer();
    // Sanity check: should be an array of Varchar because we said the
    // types within the list is not expandable.
    final ArrayWriter arrWriter = writer.array("list");
    assertEquals(ObjectType.SCALAR, arrWriter.entryType());
    final ScalarWriter strWriter = arrWriter.scalar();
    assertEquals(ValueType.STRING, strWriter.valueType());
    // Can write a batch as if this was a repeated Varchar, except
    // that any value can also be null.
    rsLoader.startBatch();
    writer.addRow(1, strArray("fred", "barney")).addRow(2, null).addRow(3, strArray("wilma", "betty", "pebbles"));
    // Verify
    final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, strArray("fred", "barney")).addRow(2, null).addRow(3, strArray("wilma", "betty", "pebbles")).build();
    RowSetUtilities.verify(expected, fixture.wrap(rsLoader.harvest()));
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 83 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestResultSetLoaderUnions method testSimpleListDynamic.

/**
 * Test a simple list created dynamically at load time.
 * The list must include a single type member.
 */
@Test
public void testSimpleListDynamic() {
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
    final RowSetLoader writer = rsLoader.writer();
    // Can write a batch as if this was a repeated Varchar, except
    // that any value can also be null.
    rsLoader.startBatch();
    writer.addColumn(MaterializedField.create("id", Types.required(MinorType.INT)));
    final ColumnMetadata colSchema = MetadataUtils.newVariant("list", DataMode.REPEATED);
    colSchema.variantSchema().addType(MinorType.VARCHAR);
    colSchema.variantSchema().becomeSimple();
    writer.addColumn(colSchema);
    // Sanity check: should be an array of Varchar because we said the
    // types within the list is not expandable.
    final ArrayWriter arrWriter = writer.array("list");
    assertEquals(ObjectType.SCALAR, arrWriter.entryType());
    final ScalarWriter strWriter = arrWriter.scalar();
    assertEquals(ValueType.STRING, strWriter.valueType());
    writer.addRow(1, strArray("fred", "barney")).addRow(2, null).addRow(3, strArray("wilma", "betty", "pebbles"));
    // Verify
    final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addList("list").addType(MinorType.VARCHAR).resumeSchema().buildSchema();
    final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, strArray("fred", "barney")).addRow(2, null).addRow(3, strArray("wilma", "betty", "pebbles")).build();
    RowSetUtilities.verify(expected, fixture.wrap(rsLoader.harvest()));
}
Also used : ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 84 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestResultSetSchemaChange method testSchemaChangeFirstBatch.

/**
 * Test the case where the schema changes in the first batch.
 * Schema changes before the first record are trivial and tested
 * elsewhere. Here we write some records, then add new columns, as a
 * JSON reader might do.
 */
@Test
public void testSchemaChangeFirstBatch() {
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator());
    RowSetLoader rootWriter = rsLoader.writer();
    rootWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
    // Create initial rows
    rsLoader.startBatch();
    int rowCount = 0;
    for (int i = 0; i < 2; i++) {
        rootWriter.start();
        rowCount++;
        rootWriter.scalar(0).setString("a_" + rowCount);
        rootWriter.save();
    }
    // Add a second column: nullable.
    rootWriter.addColumn(SchemaBuilder.columnSchema("b", MinorType.INT, DataMode.OPTIONAL));
    for (int i = 0; i < 2; i++) {
        rootWriter.start();
        rowCount++;
        rootWriter.scalar(0).setString("a_" + rowCount);
        rootWriter.scalar(1).setInt(rowCount);
        rootWriter.save();
    }
    // Add a third column. Use variable-width so that offset
    // vectors must be back-filled.
    rootWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.OPTIONAL));
    for (int i = 0; i < 2; i++) {
        rootWriter.start();
        rowCount++;
        rootWriter.scalar(0).setString("a_" + rowCount);
        rootWriter.scalar(1).setInt(rowCount);
        rootWriter.scalar(2).setString("c_" + rowCount);
        rootWriter.save();
    }
    // Fourth: Required Varchar. Previous rows are back-filled with empty strings.
    // And a required int. Back-filled with zeros.
    // May occasionally be useful. But, does have to work to prevent
    // vector corruption if some reader decides to go this route.
    rootWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.REQUIRED));
    rootWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.INT, DataMode.REQUIRED));
    for (int i = 0; i < 2; i++) {
        rootWriter.start();
        rowCount++;
        rootWriter.scalar(0).setString("a_" + rowCount);
        rootWriter.scalar(1).setInt(rowCount);
        rootWriter.scalar(2).setString("c_" + rowCount);
        rootWriter.scalar(3).setString("d_" + rowCount);
        rootWriter.scalar(4).setInt(rowCount * 10);
        rootWriter.save();
    }
    // Add an array. Now two offset vectors must be back-filled.
    rootWriter.addColumn(SchemaBuilder.columnSchema("f", MinorType.VARCHAR, DataMode.REPEATED));
    for (int i = 0; i < 2; i++) {
        rootWriter.start();
        rowCount++;
        rootWriter.scalar(0).setString("a_" + rowCount);
        rootWriter.scalar(1).setInt(rowCount);
        rootWriter.scalar(2).setString("c_" + rowCount);
        rootWriter.scalar(3).setString("d_" + rowCount);
        rootWriter.scalar(4).setInt(rowCount * 10);
        ScalarWriter arrayWriter = rootWriter.column(5).array().scalar();
        arrayWriter.setString("f_" + rowCount + "-1");
        arrayWriter.setString("f_" + rowCount + "-2");
        rootWriter.save();
    }
    // Harvest the batch and verify.
    RowSet actual = fixture.wrap(rsLoader.harvest());
    TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).addNullable("b", MinorType.INT).addNullable("c", MinorType.VARCHAR).add("d", MinorType.VARCHAR).add("e", MinorType.INT).addArray("f", MinorType.VARCHAR).buildSchema();
    SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow("a_1", null, null, "", 0, strArray()).addRow("a_2", null, null, "", 0, strArray()).addRow("a_3", 3, null, "", 0, strArray()).addRow("a_4", 4, null, "", 0, strArray()).addRow("a_5", 5, "c_5", "", 0, strArray()).addRow("a_6", 6, "c_6", "", 0, strArray()).addRow("a_7", 7, "c_7", "d_7", 70, strArray()).addRow("a_8", 8, "c_8", "d_8", 80, strArray()).addRow("a_9", 9, "c_9", "d_9", 90, strArray("f_9-1", "f_9-2")).addRow("a_10", 10, "c_10", "d_10", 100, strArray("f_10-1", "f_10-2")).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 85 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestMapAccessors method testDoubleNestedArray.

/**
 * Test a doubly-nested array of maps.
 */
@Test
public void testDoubleNestedArray() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m1").add("b", MinorType.INT).addMapArray("m2").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
    RowSetBuilder builder = fixture.rowSetBuilder(schema);
    RowSetWriter rootWriter = builder.writer();
    ScalarWriter aWriter = rootWriter.scalar("a");
    ArrayWriter a1Writer = rootWriter.array("m1");
    TupleWriter m1Writer = a1Writer.tuple();
    ScalarWriter bWriter = m1Writer.scalar("b");
    ArrayWriter a2Writer = m1Writer.array("m2");
    TupleWriter m2Writer = a2Writer.tuple();
    ScalarWriter cWriter = m2Writer.scalar("c");
    ScalarWriter dWriter = m2Writer.array("d").scalar();
    for (int i = 0; i < 5; i++) {
        aWriter.setInt(i);
        for (int j = 0; j < 4; j++) {
            int a1Key = i + 10 + j;
            bWriter.setInt(a1Key);
            for (int k = 0; k < 3; k++) {
                int a2Key = a1Key * 10 + k;
                cWriter.setInt(a2Key);
                for (int l = 0; l < 2; l++) {
                    dWriter.setString("d-" + (a2Key * 10 + l));
                }
                a2Writer.save();
            }
            a1Writer.save();
        }
        rootWriter.save();
    }
    RowSet results = builder.build();
    RowSetReader reader = results.reader();
    ScalarReader aReader = reader.scalar("a");
    ArrayReader a1Reader = reader.array("m1");
    TupleReader m1Reader = a1Reader.tuple();
    ScalarReader bReader = m1Reader.scalar("b");
    ArrayReader a2Reader = m1Reader.array("m2");
    TupleReader m2Reader = a2Reader.tuple();
    ScalarReader cReader = m2Reader.scalar("c");
    ArrayReader dArray = m2Reader.array("d");
    ScalarReader dReader = dArray.scalar();
    for (int i = 0; i < 5; i++) {
        assertTrue(reader.next());
        assertEquals(i, aReader.getInt());
        for (int j = 0; j < 4; j++) {
            assertTrue(a1Reader.next());
            int a1Key = i + 10 + j;
            assertEquals(a1Key, bReader.getInt());
            for (int k = 0; k < 3; k++) {
                assertTrue(a2Reader.next());
                int a2Key = a1Key * 10 + k;
                assertEquals(a2Key, cReader.getInt());
                for (int l = 0; l < 2; l++) {
                    assertTrue(dArray.next());
                    assertEquals("d-" + (a2Key * 10 + l), dReader.getString());
                }
            }
        }
    }
    results.clear();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

ScalarWriter (org.apache.drill.exec.vector.accessor.ScalarWriter)120 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)69 SubOperatorTest (org.apache.drill.test.SubOperatorTest)68 Test (org.junit.Test)68 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)51 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)44 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)31 ArrayWriter (org.apache.drill.exec.vector.accessor.ArrayWriter)26 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)25 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)24 TupleWriter (org.apache.drill.exec.vector.accessor.TupleWriter)23 ArrayReader (org.apache.drill.exec.vector.accessor.ArrayReader)22 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)21 ExtendableRowSet (org.apache.drill.exec.physical.rowSet.RowSet.ExtendableRowSet)19 SchemaBuilder (org.apache.drill.test.rowSet.schema.SchemaBuilder)18 ColumnMetadata (org.apache.drill.exec.record.metadata.ColumnMetadata)17 TupleReader (org.apache.drill.exec.vector.accessor.TupleReader)17 SingleRowSet (org.apache.drill.test.rowSet.RowSet.SingleRowSet)14 RowSetReader (org.apache.drill.test.rowSet.RowSetReader)14 ResultSetLoader (org.apache.drill.exec.physical.rowSet.ResultSetLoader)13