Search in sources :

Example 86 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestMapAccessors method testBasics.

@Test
public void testBasics() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).resumeSchema().add("e", MinorType.VARCHAR).buildSchema();
    RowSetBuilder builder = fixture.rowSetBuilder(schema);
    RowSetWriter rootWriter = builder.writer();
    // Verify structure and schema
    final TupleMetadata actualSchema = rootWriter.tupleSchema();
    assertEquals(3, actualSchema.size());
    assertTrue(actualSchema.metadata(1).isMap());
    assertEquals(2, actualSchema.metadata("m").tupleSchema().size());
    assertEquals(2, actualSchema.column("m").getChildren().size());
    // Write a row the way that clients will do.
    final ScalarWriter aWriter = rootWriter.scalar("a");
    final TupleWriter mWriter = rootWriter.tuple("m");
    final ScalarWriter cWriter = mWriter.scalar("c");
    final ScalarWriter dWriter = mWriter.scalar("d");
    final ScalarWriter eWriter = rootWriter.scalar("e");
    aWriter.setInt(10);
    cWriter.setInt(110);
    dWriter.setString("fred");
    eWriter.setString("pebbles");
    rootWriter.save();
    // Write another using the test-time conveniences
    rootWriter.addRow(20, mapValue(210, "barney"), "bam-bam");
    RowSet result = builder.build();
    assertEquals(2, result.rowCount());
    // Validate internal structure.
    VectorContainer container = result.container();
    assertEquals(3, container.getNumberOfColumns());
    ValueVector v = container.getValueVector(1).getValueVector();
    assertTrue(v instanceof MapVector);
    MapVector mv = (MapVector) v;
    assertEquals(2, mv.getAccessor().getValueCount());
    // Validate data. Do so using the readers to avoid verifying
    // using the very mechanisms we want to test.
    RowSetReader rootReader = result.reader();
    final ScalarReader aReader = rootReader.scalar("a");
    final TupleReader mReader = rootReader.tuple("m");
    final ScalarReader cReader = mReader.scalar("c");
    final ScalarReader dReader = mReader.scalar("d");
    final ScalarReader eReader = rootReader.scalar("e");
    rootReader.next();
    assertEquals(10, aReader.getInt());
    assertEquals(110, cReader.getInt());
    assertEquals("fred", dReader.getString());
    assertEquals("pebbles", eReader.getString());
    rootReader.next();
    assertEquals(20, aReader.getInt());
    assertEquals(210, cReader.getInt());
    assertEquals("barney", dReader.getString());
    assertEquals("bam-bam", eReader.getString());
    // Verify using the convenience methods.
    final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue(110, "fred"), "pebbles").addRow(20, mapValue(210, "barney"), "bam-bam").build();
    new RowSetComparison(expected).verify(result);
    // Test that the row set rebuilds its internal structure from
    // a vector container.
    RowSet wrapped = fixture.wrap(result.container());
    RowSetUtilities.verify(expected, wrapped);
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) VectorContainer(org.apache.drill.exec.record.VectorContainer) ValueVector(org.apache.drill.exec.vector.ValueVector) ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) RowSetComparison(org.apache.drill.test.rowSet.RowSetComparison) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) RepeatedMapVector(org.apache.drill.exec.vector.complex.RepeatedMapVector) MapVector(org.apache.drill.exec.vector.complex.MapVector) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 87 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestRepeatedListAccessors method testSchema2DWriterReader.

@Test
public void testSchema2DWriterReader() {
    final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addRepeatedList("list2").addArray(MinorType.VARCHAR).resumeSchema().buildSchema();
    final DirectRowSet rowSet = DirectRowSet.fromSchema(fixture.allocator(), schema);
    SingleRowSet result;
    {
        final RowSetWriter writer = rowSet.writer();
        assertEquals(2, writer.size());
        final ObjectWriter listObj = writer.column("list2");
        assertEquals(ObjectType.ARRAY, listObj.type());
        final ArrayWriter listWriter = listObj.array();
        assertEquals(ObjectType.ARRAY, listWriter.entryType());
        final ArrayWriter innerWriter = listWriter.array();
        assertEquals(ObjectType.SCALAR, innerWriter.entryType());
        final ScalarWriter strWriter = innerWriter.scalar();
        // Write one row using writers explicitly.
        // 
        // (1, [["a, "b"], ["c", "d"]])
        // 
        // Note auto increment of inner list on write.
        writer.scalar("id").setInt(1);
        strWriter.setString("a");
        strWriter.setString("b");
        listWriter.save();
        strWriter.setString("c");
        strWriter.setString("d");
        listWriter.save();
        writer.save();
        // Write more rows using the convenience methods.
        // 
        // (2, [["e"], [], ["f", "g", "h"]])
        // (3, [])
        // (4, [[], ["i"], []])
        writer.addRow(2, objArray(strArray("e"), strArray(), strArray("f", "g", "h"))).addRow(3, objArray()).addRow(4, objArray(strArray(), strArray("i"), strArray()));
        result = writer.done();
    }
    // Verify one row using the individual readers.
    {
        final RowSetReader reader = result.reader();
        assertEquals(2, reader.columnCount());
        final ObjectReader listObj = reader.column("list2");
        assertEquals(ObjectType.ARRAY, listObj.type());
        final ArrayReader listReader = listObj.array();
        assertEquals(ObjectType.ARRAY, listReader.entryType());
        final ArrayReader innerReader = listReader.array();
        assertEquals(ObjectType.SCALAR, innerReader.entryType());
        final ScalarReader strReader = innerReader.scalar();
        // Write one row using writers explicitly.
        // 
        // (1, [["a, "b"], ["c", "d"]])
        assertTrue(reader.next());
        assertEquals(2, listReader.size());
        assertTrue(listReader.next());
        assertEquals(2, innerReader.size());
        assertTrue(innerReader.next());
        assertEquals("a", strReader.getString());
        assertTrue(innerReader.next());
        assertEquals("b", strReader.getString());
        assertFalse(innerReader.next());
        assertTrue(listReader.next());
        assertEquals(2, innerReader.size());
        assertTrue(innerReader.next());
        assertEquals("c", strReader.getString());
        assertTrue(innerReader.next());
        assertEquals("d", strReader.getString());
        assertFalse(innerReader.next());
        assertFalse(listReader.next());
    }
    // Verify both rows by building another row set and comparing.
    final RowSet expected = fixture.rowSetBuilder(schema).addRow(1, objArray(strArray("a", "b"), strArray("c", "d"))).addRow(2, objArray(strArray("e"), strArray(), strArray("f", "g", "h"))).addRow(3, objArray()).addRow(4, objArray(strArray(), strArray("i"), strArray())).build();
    new RowSetComparison(expected).verify(result);
    // Test that the row set rebuilds its internal structure from
    // a vector container.
    RowSet wrapped = fixture.wrap(result.container());
    RowSetUtilities.verify(expected, wrapped);
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ObjectWriter(org.apache.drill.exec.vector.accessor.ObjectWriter) ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) RowSetComparison(org.apache.drill.test.rowSet.RowSetComparison) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) BatchSchemaBuilder(org.apache.drill.exec.record.BatchSchemaBuilder) ObjectReader(org.apache.drill.exec.vector.accessor.ObjectReader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 88 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestScalarAccessors method testBitResize.

/**
 * The bit reader/writer are special and use the BitVector directly.
 * Ensure that resize works in this special case.
 */
@Test
public void testBitResize() {
    TupleMetadata schema = new SchemaBuilder().add("col", MinorType.BIT).buildSchema();
    RowSetBuilder rsb = new RowSetBuilder(fixture.allocator(), schema, 100);
    ScalarWriter bitWriter = rsb.writer().scalar(0);
    for (int i = 0; i < ValueVector.MAX_ROW_COUNT; i++) {
        bitWriter.setBoolean((i % 5) == 0);
        rsb.writer().save();
    }
    SingleRowSet rs = rsb.build();
    RowSetReader reader = rs.reader();
    ScalarReader bitReader = reader.scalar(0);
    for (int i = 0; i < ValueVector.MAX_ROW_COUNT; i++) {
        reader.next();
        assertEquals((i % 5) == 0, bitReader.getBoolean());
    }
    rs.clear();
}
Also used : ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 89 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestScalarAccessors method doTestAppend.

private void doTestAppend(TupleMetadata schema) {
    DirectRowSet rs = DirectRowSet.fromSchema(fixture.allocator(), schema);
    RowSetWriter writer = rs.writer(100);
    ScalarWriter colWriter = writer.scalar("col");
    byte[] first = "abc".getBytes();
    byte[] second = "12345".getBytes();
    colWriter.setBytes(first, first.length);
    colWriter.appendBytes(second, second.length);
    writer.save();
    colWriter.setBytes(second, second.length);
    colWriter.appendBytes(first, first.length);
    writer.save();
    colWriter.setBytes(first, first.length);
    colWriter.appendBytes(second, second.length);
    writer.save();
    RowSet actual = writer.done();
    RowSet expected = new RowSetBuilder(fixture.allocator(), schema).addSingleCol("abc12345").addSingleCol("12345abc").addSingleCol("abc12345").build();
    RowSetUtilities.verify(expected, actual);
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter)

Example 90 with ScalarWriter

use of org.apache.drill.exec.vector.accessor.ScalarWriter in project drill by apache.

the class TestFillEmpties method testBitFillEmpties.

/**
 * Bit vector is special; packs 8 values per byte. Use custom
 * logic to ship entire bytes.
 */
@Test
public void testBitFillEmpties() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.BIT).buildSchema();
    ExtendableRowSet rs = fixture.rowSet(schema);
    RowSetWriter writer = rs.writer();
    ScalarWriter colWriter = writer.scalar(0);
    for (int i = 0; i < ROW_COUNT; i++) {
        if (i % 43 == 0) {
            colWriter.setInt(1);
        }
        writer.save();
    }
    SingleRowSet result = writer.done();
    RowSetReader reader = result.reader();
    ScalarReader colReader = reader.scalar(0);
    for (int i = 0; i < ROW_COUNT; i++) {
        assertTrue(reader.next());
        assertEquals(i % 43 == 0 ? 1 : 0, colReader.getInt());
    }
    result.clear();
}
Also used : ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) ExtendableRowSet(org.apache.drill.exec.physical.rowSet.RowSet.ExtendableRowSet) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

ScalarWriter (org.apache.drill.exec.vector.accessor.ScalarWriter)120 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)69 SubOperatorTest (org.apache.drill.test.SubOperatorTest)68 Test (org.junit.Test)68 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)51 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)44 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)31 ArrayWriter (org.apache.drill.exec.vector.accessor.ArrayWriter)26 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)25 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)24 TupleWriter (org.apache.drill.exec.vector.accessor.TupleWriter)23 ArrayReader (org.apache.drill.exec.vector.accessor.ArrayReader)22 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)21 ExtendableRowSet (org.apache.drill.exec.physical.rowSet.RowSet.ExtendableRowSet)19 SchemaBuilder (org.apache.drill.test.rowSet.schema.SchemaBuilder)18 ColumnMetadata (org.apache.drill.exec.record.metadata.ColumnMetadata)17 TupleReader (org.apache.drill.exec.vector.accessor.TupleReader)17 SingleRowSet (org.apache.drill.test.rowSet.RowSet.SingleRowSet)14 RowSetReader (org.apache.drill.test.rowSet.RowSetReader)14 ResultSetLoader (org.apache.drill.exec.physical.rowSet.ResultSetLoader)13