Search in sources :

Example 21 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class HDF5BatchReader method getAndMapCompoundData.

/**
 * Processes the MAP data type which can be found in HDF5 files.
 * It automatically flattens anything greater than 2 dimensions.
 *
 * @param path the HDF5 path tp the compound data
 * @param reader the HDF5 reader for the data file
 * @param rowWriter the rowWriter to write the data
 */
private void getAndMapCompoundData(String path, HdfFile reader, RowSetLoader rowWriter) {
    final String COMPOUND_DATA_FIELD_NAME = "compound_data";
    List<CompoundDataMember> data = ((CompoundDataType) reader.getDatasetByPath(path).getDataType()).getMembers();
    int index;
    // Add map to schema
    SchemaBuilder innerSchema = new SchemaBuilder();
    MapBuilder mapBuilder = innerSchema.addMap(COMPOUND_DATA_FIELD_NAME);
    // Loop to build schema
    for (CompoundDataMember dataMember : data) {
        String dataType = dataMember.getDataType().getJavaType().getName();
        String fieldName = dataMember.getName();
        switch(dataType) {
            case "byte":
                mapBuilder.add(fieldName, MinorType.TINYINT, DataMode.REPEATED);
                break;
            case "short":
                mapBuilder.add(fieldName, MinorType.SMALLINT, DataMode.REPEATED);
                break;
            case "int":
                mapBuilder.add(fieldName, MinorType.INT, DataMode.REPEATED);
                break;
            case "double":
                mapBuilder.add(fieldName, MinorType.FLOAT8, DataMode.REPEATED);
                break;
            case "float":
                mapBuilder.add(fieldName, MinorType.FLOAT4, DataMode.REPEATED);
                break;
            case "long":
                mapBuilder.add(fieldName, MinorType.BIGINT, DataMode.REPEATED);
                break;
            case "boolean":
                mapBuilder.add(fieldName, MinorType.BIT, DataMode.REPEATED);
                break;
            case "java.lang.String":
                mapBuilder.add(fieldName, MinorType.VARCHAR, DataMode.REPEATED);
                break;
            default:
                logger.warn("Drill cannot process data type {} in compound fields.", dataType);
                break;
        }
    }
    TupleMetadata finalInnerSchema = mapBuilder.resumeSchema().buildSchema();
    index = rowWriter.tupleSchema().index(COMPOUND_DATA_FIELD_NAME);
    if (index == -1) {
        index = rowWriter.addColumn(finalInnerSchema.column(COMPOUND_DATA_FIELD_NAME));
    }
    TupleWriter listWriter = rowWriter.column(index).tuple();
    for (CompoundDataMember dataMember : data) {
        String dataType = dataMember.getDataType().getJavaType().getName();
        String fieldName = dataMember.getName();
        int[] dataLength = reader.getDatasetByPath(path).getDimensions();
        Object rawData = ((LinkedHashMap<String, ?>) reader.getDatasetByPath(path).getData()).get(fieldName);
        ArrayWriter innerWriter = listWriter.array(fieldName);
        for (int i = 0; i < dataLength[0]; i++) {
            switch(dataType) {
                case "byte":
                    innerWriter.scalar().setInt(((byte[]) rawData)[i]);
                    break;
                case "short":
                    innerWriter.scalar().setInt(((short[]) rawData)[i]);
                    break;
                case "int":
                    innerWriter.scalar().setInt(((int[]) rawData)[i]);
                    break;
                case "double":
                    innerWriter.scalar().setDouble(((double[]) rawData)[i]);
                    break;
                case "float":
                    innerWriter.scalar().setFloat(((float[]) rawData)[i]);
                    break;
                case "long":
                    innerWriter.scalar().setLong(((long[]) rawData)[i]);
                    break;
                case "boolean":
                    innerWriter.scalar().setBoolean(((boolean[]) rawData)[i]);
                    break;
                case "java.lang.String":
                    if ((((String[]) rawData)[i]) != null) {
                        innerWriter.scalar().setString(((String[]) rawData)[i]);
                    } else {
                        innerWriter.scalar().setNull();
                    }
                    break;
                default:
                    logger.warn("Drill cannot process data type {} in compound fields.", dataType);
                    break;
            }
        }
    }
}
Also used : CompoundDataType(io.jhdf.object.datatype.CompoundDataType) CompoundDataMember(io.jhdf.object.datatype.CompoundDataType.CompoundDataMember) LinkedHashMap(java.util.LinkedHashMap) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) MapBuilder(org.apache.drill.exec.record.metadata.MapBuilder) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter)

Example 22 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestVariantAccessors method testUnionWithMap.

/**
 * Test a variant (AKA "union vector") at the top level which
 * includes a map.
 */
@Test
public void testUnionWithMap() {
    final TupleMetadata schema = new SchemaBuilder().addUnion("u").addType(MinorType.VARCHAR).addMap().addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).resumeUnion().resumeSchema().buildSchema();
    SingleRowSet result;
    // Write values
    {
        final ExtendableRowSet rs = fixture.rowSet(schema);
        final RowSetWriter writer = rs.writer();
        // Sanity check of writer structure
        final ObjectWriter wo = writer.column(0);
        assertEquals(ObjectType.VARIANT, wo.type());
        final VariantWriter vw = wo.variant();
        assertTrue(vw.hasType(MinorType.VARCHAR));
        final ObjectWriter strObj = vw.member(MinorType.VARCHAR);
        final ScalarWriter strWriter = strObj.scalar();
        assertSame(strWriter, vw.scalar(MinorType.VARCHAR));
        assertTrue(vw.hasType(MinorType.MAP));
        final ObjectWriter mapObj = vw.member(MinorType.MAP);
        final TupleWriter mWriter = mapObj.tuple();
        assertSame(mWriter, vw.tuple());
        final ScalarWriter aWriter = mWriter.scalar("a");
        final ScalarWriter bWriter = mWriter.scalar("b");
        // First row: string "first"
        vw.setType(MinorType.VARCHAR);
        strWriter.setString("first");
        writer.save();
        // Second row: a map
        vw.setType(MinorType.MAP);
        aWriter.setInt(20);
        bWriter.setString("fred");
        writer.save();
        // Third row: null
        vw.setNull();
        writer.save();
        // Fourth row: map with a null string
        vw.setType(MinorType.MAP);
        aWriter.setInt(40);
        bWriter.setNull();
        writer.save();
        // Fifth row: string "last"
        vw.setType(MinorType.VARCHAR);
        strWriter.setString("last");
        writer.save();
        result = writer.done();
        assertEquals(5, result.rowCount());
    }
    // Read the values.
    {
        final RowSetReader reader = result.reader();
        // Sanity check of structure
        final ObjectReader ro = reader.column(0);
        assertEquals(ObjectType.VARIANT, ro.type());
        final VariantReader vr = ro.variant();
        assertTrue(vr.hasType(MinorType.VARCHAR));
        final ObjectReader strObj = vr.member(MinorType.VARCHAR);
        final ScalarReader strReader = strObj.scalar();
        assertSame(strReader, vr.scalar(MinorType.VARCHAR));
        assertTrue(vr.hasType(MinorType.MAP));
        final ObjectReader mapObj = vr.member(MinorType.MAP);
        final TupleReader mReader = mapObj.tuple();
        assertSame(mReader, vr.tuple());
        final ScalarReader aReader = mReader.scalar("a");
        final ScalarReader bReader = mReader.scalar("b");
        // First row: string "first"
        assertTrue(reader.next());
        assertFalse(vr.isNull());
        assertEquals(MinorType.VARCHAR, vr.dataType());
        assertFalse(strReader.isNull());
        assertTrue(mReader.isNull());
        assertEquals("first", strReader.getString());
        // Second row: a map
        assertTrue(reader.next());
        assertFalse(vr.isNull());
        assertEquals(MinorType.MAP, vr.dataType());
        assertTrue(strReader.isNull());
        assertFalse(mReader.isNull());
        assertFalse(aReader.isNull());
        assertEquals(20, aReader.getInt());
        assertFalse(bReader.isNull());
        assertEquals("fred", bReader.getString());
        // Third row: null
        assertTrue(reader.next());
        assertTrue(vr.isNull());
        assertTrue(strReader.isNull());
        assertTrue(mReader.isNull());
        assertTrue(aReader.isNull());
        assertTrue(bReader.isNull());
        // Fourth row: map with a null string
        assertTrue(reader.next());
        assertEquals(MinorType.MAP, vr.dataType());
        assertEquals(40, aReader.getInt());
        assertTrue(bReader.isNull());
        // Fifth row: string "last"
        assertTrue(reader.next());
        assertEquals(MinorType.VARCHAR, vr.dataType());
        assertEquals("last", strReader.getString());
        assertFalse(reader.next());
    }
    result.clear();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) VariantWriter(org.apache.drill.exec.vector.accessor.VariantWriter) ObjectWriter(org.apache.drill.exec.vector.accessor.ObjectWriter) ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) ObjectReader(org.apache.drill.exec.vector.accessor.ObjectReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) ExtendableRowSet(org.apache.drill.exec.physical.rowSet.RowSet.ExtendableRowSet) VariantReader(org.apache.drill.exec.vector.accessor.VariantReader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 23 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestRowSet method example.

/**
 * The code below is not a test. Rather, it is a simple example of
 * how to write a batch of data using writers, then read it using
 * readers.
 */
@Test
public void example() {
    // Step 1: Define a schema. In a real app, this
    // will be provided by a reader, by an incoming batch,
    // etc.
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.VARCHAR).addArray("b", MinorType.INT).addMap("c").add("c1", MinorType.INT).add("c2", MinorType.VARCHAR).resumeSchema().buildSchema();
    // Step 2: Create a batch. Done here because this is
    // a batch-oriented test. Done automatically in the
    // result set loader.
    final DirectRowSet drs = DirectRowSet.fromSchema(fixture.allocator(), schema);
    // Step 3: Create the writer.
    final RowSetWriter writer = drs.writer();
    // Step 4: Populate data. Here we do it the way an app would:
    // using the individual accessors. See tests above for the many
    // ways this can be done depending on the need of the app.
    // 
    // Write two rows:
    // ("fred", [10, 11], {12, "wilma"})
    // ("barney", [20, 21], {22, "betty"})
    // 
    // This example uses Java strings for Varchar. Real code might
    // use byte arrays.
    writer.scalar("a").setString("fred");
    final ArrayWriter bWriter = writer.array("b");
    bWriter.scalar().setInt(10);
    bWriter.scalar().setInt(11);
    final TupleWriter cWriter = writer.tuple("c");
    cWriter.scalar("c1").setInt(12);
    cWriter.scalar("c2").setString("wilma");
    writer.save();
    writer.scalar("a").setString("barney");
    bWriter.scalar().setInt(20);
    bWriter.scalar().setInt(21);
    cWriter.scalar("c1").setInt(22);
    cWriter.scalar("c2").setString("betty");
    writer.save();
    // Step 5: "Harvest" the batch. Done differently in the
    // result set loader.
    final SingleRowSet rowSet = writer.done();
    // Step 5: Create a reader.
    final RowSetReader reader = rowSet.reader();
    while (reader.next()) {
        final StringBuilder sb = new StringBuilder();
        sb.append(print(reader.scalar("a").getString()));
        final ArrayReader bReader = reader.array("b");
        while (bReader.next()) {
            sb.append(print(bReader.scalar().getInt()));
        }
        final TupleReader cReader = reader.tuple("c");
        sb.append(print(cReader.scalar("c1").getInt()));
        sb.append(print(cReader.scalar("c2").getString()));
        logger.debug(sb.toString());
    }
    // Step 7: Free memory.
    rowSet.clear();
}
Also used : ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 24 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestMapAccessors method testBasicRepeatedMap.

@Test
public void testBasicRepeatedMap() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).resumeSchema().buildSchema();
    RowSetBuilder builder = fixture.rowSetBuilder(schema);
    RowSetWriter rootWriter = builder.writer();
    // Verify structure and schema
    TupleMetadata actualSchema = rootWriter.tupleSchema();
    assertEquals(2, actualSchema.size());
    assertTrue(actualSchema.metadata(1).isArray());
    assertTrue(actualSchema.metadata(1).isMap());
    assertEquals(2, actualSchema.metadata("m").tupleSchema().size());
    assertEquals(2, actualSchema.column("m").getChildren().size());
    TupleWriter mapWriter = rootWriter.array("m").tuple();
    assertSame(actualSchema.metadata("m").tupleSchema(), mapWriter.schema().tupleSchema());
    assertSame(mapWriter.tupleSchema(), mapWriter.schema().tupleSchema());
    assertSame(mapWriter.tupleSchema().metadata(0), mapWriter.scalar(0).schema());
    assertSame(mapWriter.tupleSchema().metadata(1), mapWriter.scalar(1).schema());
    // Write a couple of rows with arrays.
    rootWriter.addRow(10, mapArray(mapValue(110, "d1.1"), mapValue(120, "d2.2"))).addRow(20, mapArray()).addRow(30, mapArray(mapValue(310, "d3.1"), mapValue(320, "d3.2"), mapValue(330, "d3.3")));
    // Verify the first batch
    RowSet actual = builder.build();
    RepeatedMapVector mapVector = (RepeatedMapVector) actual.container().getValueVector(1).getValueVector();
    MaterializedField mapField = mapVector.getField();
    assertEquals(2, mapField.getChildren().size());
    Iterator<MaterializedField> iter = mapField.getChildren().iterator();
    assertTrue(mapWriter.scalar(0).schema().schema().isEquivalent(iter.next()));
    assertTrue(mapWriter.scalar(1).schema().schema().isEquivalent(iter.next()));
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapArray(mapValue(110, "d1.1"), mapValue(120, "d2.2"))).addRow(20, mapArray()).addRow(30, mapArray(mapValue(310, "d3.1"), mapValue(320, "d3.2"), mapValue(330, "d3.3"))).build();
    new RowSetComparison(expected).verify(actual);
    // Test that the row set rebuilds its internal structure from
    // a vector container.
    RowSet wrapped = fixture.wrap(actual.container());
    RowSetUtilities.verify(expected, wrapped);
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSetComparison(org.apache.drill.test.rowSet.RowSetComparison) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) RepeatedMapVector(org.apache.drill.exec.vector.complex.RepeatedMapVector) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) MaterializedField(org.apache.drill.exec.record.MaterializedField) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 25 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestRowSet method testDictStructureMapValue.

@Test
public void testDictStructureMapValue() {
    final String dictName = "d";
    final int bScale = 1;
    final TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addDict(dictName, MinorType.INT).mapValue().add("a", MinorType.INT).add("b", MinorType.VARDECIMAL, 8, bScale).resumeDict().resumeSchema().buildSchema();
    final ExtendableRowSet rowSet = fixture.rowSet(schema);
    final RowSetWriter writer = rowSet.writer();
    // Dict with Map value
    assertEquals(ObjectType.ARRAY, writer.column(dictName).type());
    final ScalarWriter idWriter = writer.scalar(0);
    final DictWriter dictWriter = writer.column(1).dict();
    assertEquals(ValueType.INTEGER, dictWriter.keyType());
    assertEquals(ObjectType.TUPLE, dictWriter.valueType());
    final ScalarWriter keyWriter = dictWriter.keyWriter();
    final TupleWriter valueWriter = dictWriter.valueWriter().tuple();
    assertEquals(ValueType.INTEGER, keyWriter.valueType());
    ScalarWriter aWriter = valueWriter.scalar("a");
    ScalarWriter bWriter = valueWriter.scalar("b");
    assertEquals(ValueType.INTEGER, aWriter.valueType());
    assertEquals(ValueType.DECIMAL, bWriter.valueType());
    // Write data
    idWriter.setInt(1);
    keyWriter.setInt(11);
    aWriter.setInt(10);
    bWriter.setDecimal(BigDecimal.valueOf(1));
    // advance to next entry position
    dictWriter.save();
    keyWriter.setInt(12);
    aWriter.setInt(11);
    bWriter.setDecimal(BigDecimal.valueOf(2));
    dictWriter.save();
    writer.save();
    idWriter.setInt(2);
    keyWriter.setInt(21);
    aWriter.setInt(20);
    bWriter.setDecimal(BigDecimal.valueOf(3));
    dictWriter.save();
    writer.save();
    idWriter.setInt(3);
    keyWriter.setInt(31);
    aWriter.setInt(30);
    bWriter.setDecimal(BigDecimal.valueOf(4));
    dictWriter.save();
    keyWriter.setInt(32);
    aWriter.setInt(31);
    bWriter.setDecimal(BigDecimal.valueOf(5));
    dictWriter.save();
    keyWriter.setInt(33);
    aWriter.setInt(32);
    bWriter.setDecimal(BigDecimal.valueOf(6));
    dictWriter.save();
    writer.save();
    // Finish the row set and get a reader.
    final SingleRowSet actual = writer.done();
    final RowSetReader reader = actual.reader();
    // Verify reader structure
    assertEquals(ObjectType.ARRAY, reader.column(dictName).type());
    final DictReader dictReader = reader.dict(1);
    assertEquals(ObjectType.ARRAY, dictReader.type());
    assertEquals(ValueType.INTEGER, dictReader.keyColumnType());
    assertEquals(ObjectType.TUPLE, dictReader.valueColumnType());
    final KeyAccessor keyAccessor = dictReader.keyAccessor();
    final TupleReader valueReader = dictReader.valueReader().tuple();
    // Row 1: get value reader with its position set to entry corresponding to a key
    assertTrue(reader.next());
    // dict itself is not null
    assertFalse(dictReader.isNull());
    assertTrue(keyAccessor.find(12));
    assertEquals(11, valueReader.scalar("a").getInt());
    assertEquals(BigDecimal.valueOf(2.0), valueReader.scalar("b").getDecimal());
    // MapReader#getObject() returns a List containing values for each column
    // rather than mapping of column name to it's value, hence List is expected for Dict's value.
    Map<Object, Object> map = map(11, Arrays.asList(10, BigDecimal.valueOf(1.0)), 12, Arrays.asList(11, BigDecimal.valueOf(2.0)));
    assertEquals(map, dictReader.getObject());
    // Row 2
    assertTrue(reader.next());
    assertFalse(keyAccessor.find(222));
    assertTrue(keyAccessor.find(21));
    assertEquals(Arrays.asList(20, BigDecimal.valueOf(3.0)), valueReader.getObject());
    map = map(21, Arrays.asList(20, BigDecimal.valueOf(3.0)));
    assertEquals(map, dictReader.getObject());
    // Row 3
    assertTrue(reader.next());
    assertTrue(keyAccessor.find(32));
    assertFalse(valueReader.isNull());
    assertEquals(31, valueReader.scalar("a").getInt());
    assertEquals(BigDecimal.valueOf(5.0), valueReader.scalar("b").getDecimal());
    assertTrue(keyAccessor.find(31));
    assertEquals(30, valueReader.scalar("a").getInt());
    assertEquals(BigDecimal.valueOf(4.0), valueReader.scalar("b").getDecimal());
    assertFalse(keyAccessor.find(404));
    map = map(31, Arrays.asList(30, BigDecimal.valueOf(4.0)), 32, Arrays.asList(31, BigDecimal.valueOf(5.0)), 33, Arrays.asList(32, BigDecimal.valueOf(6.0)));
    assertEquals(map, dictReader.getObject());
    assertFalse(reader.next());
    // Verify that the dict accessor's value count was set.
    final DictVector dictVector = (DictVector) actual.container().getValueVector(1).getValueVector();
    assertEquals(3, dictVector.getAccessor().getValueCount());
    final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(1, map(11, objArray(10, BigDecimal.valueOf(1.0)), 12, objArray(11, BigDecimal.valueOf(2.0)))).addRow(2, map(21, objArray(20, BigDecimal.valueOf(3.0)))).addRow(3, map(31, objArray(30, BigDecimal.valueOf(4.0)), 32, objArray(31, BigDecimal.valueOf(5.0)), 33, objArray(32, BigDecimal.valueOf(6.0)))).build();
    RowSetUtilities.verify(expected, actual);
}
Also used : DictVector(org.apache.drill.exec.vector.complex.DictVector) RepeatedDictVector(org.apache.drill.exec.vector.complex.RepeatedDictVector) DictWriter(org.apache.drill.exec.vector.accessor.DictWriter) TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) KeyAccessor(org.apache.drill.exec.vector.accessor.KeyAccessor) DictReader(org.apache.drill.exec.vector.accessor.DictReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) ExtendableRowSet(org.apache.drill.exec.physical.rowSet.RowSet.ExtendableRowSet) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

TupleWriter (org.apache.drill.exec.vector.accessor.TupleWriter)59 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)52 SubOperatorTest (org.apache.drill.test.SubOperatorTest)50 Test (org.junit.Test)50 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)35 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)33 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)26 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)25 ScalarWriter (org.apache.drill.exec.vector.accessor.ScalarWriter)23 TupleReader (org.apache.drill.exec.vector.accessor.TupleReader)20 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)19 ArrayWriter (org.apache.drill.exec.vector.accessor.ArrayWriter)18 SchemaBuilder (org.apache.drill.test.rowSet.schema.SchemaBuilder)16 SingleRowSet (org.apache.drill.test.rowSet.RowSet.SingleRowSet)15 ResultSetLoader (org.apache.drill.exec.physical.rowSet.ResultSetLoader)14 RowSetLoader (org.apache.drill.exec.physical.rowSet.RowSetLoader)14 RowSet (org.apache.drill.test.rowSet.RowSet)13 ArrayReader (org.apache.drill.exec.vector.accessor.ArrayReader)12 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)12 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)10