Search in sources :

Example 41 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestResultSetLoaderMapArray method testOverwriteRow.

/**
 * Version of the {#link TestResultSetLoaderProtocol#testOverwriteRow()} test
 * that uses nested columns inside an array of maps. Here we must call
 * {@code start()} to reset the array back to the initial start position after
 * each "discard."
 */
@Test
public void testOverwriteRow() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Can't use the shortcut to populate rows when doing overwrites.
    ScalarWriter aWriter = rootWriter.scalar("a");
    ArrayWriter maWriter = rootWriter.array("m");
    TupleWriter mWriter = maWriter.tuple();
    ScalarWriter bWriter = mWriter.scalar("b");
    ScalarWriter cWriter = mWriter.scalar("c");
    // Write 100,000 rows, overwriting 99% of them. This will cause vector
    // overflow and data corruption if overwrite does not work; but will happily
    // produce the correct result if everything works as it should.
    byte[] value = new byte[512];
    Arrays.fill(value, (byte) 'X');
    int count = 0;
    rsLoader.startBatch();
    while (count < 10_000) {
        rootWriter.start();
        count++;
        aWriter.setInt(count);
        for (int i = 0; i < 10; i++) {
            bWriter.setInt(count * 10 + i);
            cWriter.setBytes(value, value.length);
            maWriter.save();
        }
        if (count % 100 == 0) {
            rootWriter.save();
        }
    }
    // Verify using a reader.
    RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(count / 100, result.rowCount());
    RowSetReader reader = result.reader();
    ArrayReader maReader = reader.array("m");
    TupleReader mReader = maReader.tuple();
    int rowId = 1;
    while (reader.next()) {
        assertEquals(rowId * 100, reader.scalar("a").getInt());
        assertEquals(10, maReader.size());
        for (int i = 0; i < 10; i++) {
            assert (maReader.next());
            assertEquals(rowId * 1000 + i, mReader.scalar("b").getInt());
            assertTrue(Arrays.equals(value, mReader.scalar("c").getBytes()));
        }
        rowId++;
    }
    result.clear();
    rsLoader.close();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 42 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestResultSetLoaderMaps method testNestedMapsNullable.

/**
 * Create nested maps. Then, add columns to each map
 * on the fly. This time, with nullable types.
 */
@Test
public void testNestedMapsNullable() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").addNullable("b", MinorType.VARCHAR).addMap("m2").addNullable("c", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    final RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    rootWriter.addRow(10, mapValue("b1", mapValue("c1")));
    // Validate first batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue("b1", mapValue("c1"))).build();
    RowSetUtilities.verify(expected, actual);
    // Now add columns in the second batch.
    rsLoader.startBatch();
    rootWriter.addRow(20, mapValue("b2", mapValue("c2")));
    final TupleWriter m1Writer = rootWriter.tuple("m1");
    m1Writer.addColumn(SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.OPTIONAL));
    final TupleWriter m2Writer = m1Writer.tuple("m2");
    m2Writer.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.OPTIONAL));
    rootWriter.addRow(30, mapValue("b3", mapValue("c3", "e3"), "d3"));
    // And another set while the write proceeds.
    m1Writer.addColumn(SchemaBuilder.columnSchema("f", MinorType.VARCHAR, DataMode.OPTIONAL));
    m2Writer.addColumn(SchemaBuilder.columnSchema("g", MinorType.VARCHAR, DataMode.OPTIONAL));
    rootWriter.addRow(40, mapValue("b4", mapValue("c4", "e4", "g4"), "d4", "e4"));
    // Validate second batch
    actual = fixture.wrap(rsLoader.harvest());
    final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").addNullable("b", MinorType.VARCHAR).addMap("m2").addNullable("c", MinorType.VARCHAR).addNullable("e", MinorType.VARCHAR).addNullable("g", MinorType.VARCHAR).resumeMap().addNullable("d", MinorType.VARCHAR).addNullable("f", MinorType.VARCHAR).resumeSchema().buildSchema();
    expected = fixture.rowSetBuilder(expectedSchema).addRow(20, mapValue("b2", mapValue("c2", null, null), null, null)).addRow(30, mapValue("b3", mapValue("c3", "e3", null), "d3", null)).addRow(40, mapValue("b4", mapValue("c4", "e4", "g4"), "d4", "e4")).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 43 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestResultSetLoaderMaps method testMapWithArray.

/**
 * Test a map that contains a scalar array. No reason to suspect that this
 * will have problem as the array writer is fully tested in the accessor
 * subsystem. Still, need to test the cardinality methods of the loader
 * layer.
 */
@Test
public void testMapWithArray() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").addArray("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeSchema().buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    final RowSetLoader rootWriter = rsLoader.writer();
    // Write some rows
    rsLoader.startBatch();
    rootWriter.addRow(10, mapValue(intArray(110, 120, 130), strArray("d1.1", "d1.2", "d1.3", "d1.4"))).addRow(20, mapValue(intArray(210), strArray())).addRow(30, mapValue(intArray(), strArray("d3.1")));
    // Validate first batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue(intArray(110, 120, 130), strArray("d1.1", "d1.2", "d1.3", "d1.4"))).addRow(20, mapValue(intArray(210), strArray())).addRow(30, mapValue(intArray(), strArray("d3.1"))).build();
    RowSetUtilities.verify(expected, actual);
    // Add another array after the first row in the second batch.
    rsLoader.startBatch();
    rootWriter.addRow(40, mapValue(intArray(410, 420), strArray("d4.1", "d4.2"))).addRow(50, mapValue(intArray(510), strArray("d5.1")));
    final TupleWriter mapWriter = rootWriter.tuple("m");
    mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REPEATED));
    rootWriter.addRow(60, mapValue(intArray(610, 620), strArray("d6.1", "d6.2"), strArray("e6.1", "e6.2"))).addRow(70, mapValue(intArray(710), strArray(), strArray("e7.1", "e7.2")));
    // Validate first batch. The new array should have been back-filled with
    // empty offsets for the missing rows.
    actual = fixture.wrap(rsLoader.harvest());
    expected = fixture.rowSetBuilder(actual.schema()).addRow(40, mapValue(intArray(410, 420), strArray("d4.1", "d4.2"), strArray())).addRow(50, mapValue(intArray(510), strArray("d5.1"), strArray())).addRow(60, mapValue(intArray(610, 620), strArray("d6.1", "d6.2"), strArray("e6.1", "e6.2"))).addRow(70, mapValue(intArray(710), strArray(), strArray("e7.1", "e7.2"))).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 44 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestResultSetLoaderMaps method testMapEvolution.

/**
 * Create schema with a map, then add columns to the map
 * after delivering the first batch. The new columns should appear
 * in the second-batch output.
 */
@Test
public void testMapEvolution() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).resumeSchema().buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    assertEquals(3, rsLoader.schemaVersion());
    final RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    rootWriter.addRow(10, mapValue("fred")).addRow(20, mapValue("barney"));
    RowSet actual = fixture.wrap(rsLoader.harvest());
    assertEquals(3, rsLoader.schemaVersion());
    assertEquals(2, actual.rowCount());
    // Validate first batch
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue("fred")).addRow(20, mapValue("barney")).build();
    RowSetUtilities.verify(expected, actual);
    // Add three columns in the second batch. One before
    // the batch starts, one before the first row, and one after
    // the first row.
    final TupleWriter mapWriter = rootWriter.tuple("m");
    mapWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.REQUIRED));
    rsLoader.startBatch();
    mapWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.BIGINT, DataMode.REQUIRED));
    rootWriter.addRow(30, mapValue("wilma", 130, 130_000L));
    mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REQUIRED));
    rootWriter.addRow(40, mapValue("betty", 140, 140_000L, "bam-bam"));
    actual = fixture.wrap(rsLoader.harvest());
    assertEquals(6, rsLoader.schemaVersion());
    assertEquals(2, actual.rowCount());
    // Validate first batch
    final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.VARCHAR).add("c", MinorType.INT).add("d", MinorType.BIGINT).add("e", MinorType.VARCHAR).resumeSchema().buildSchema();
    expected = fixture.rowSetBuilder(expectedSchema).addRow(30, mapValue("wilma", 130, 130_000L, "")).addRow(40, mapValue("betty", 140, 140_000L, "bam-bam")).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 45 with TupleWriter

use of org.apache.drill.exec.vector.accessor.TupleWriter in project drill by apache.

the class TestResultSetLoaderDicts method testMapValueNullableFields.

/**
 * Create dict with map value. Then, add columns to the map
 * on the fly. Use required, variable-width columns since
 * those require the most processing and are most likely to
 * fail if anything is out of place.
 */
@Test
public void testMapValueNullableFields() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addDict("d", MinorType.VARCHAR).mapValue().addNullable("b", MinorType.VARCHAR).resumeDict().resumeSchema().buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    assertEquals(5, rsLoader.schemaVersion());
    final RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    rootWriter.addRow(10, map("a", mapValue("c1"), "b", mapValue("c2")));
    // Validate first batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    assertEquals(5, rsLoader.schemaVersion());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, map("a", mapValue("c1"), "b", mapValue("c2"))).build();
    RowSetUtilities.verify(expected, actual);
    // Now add columns in the second batch.
    rsLoader.startBatch();
    rootWriter.addRow(20, map("a2", mapValue("c11"), "b2", mapValue("c12"), "c2", mapValue("c13")));
    final DictWriter dictWriter = rootWriter.dict("d");
    final TupleWriter nestedMapWriter = dictWriter.valueWriter().tuple();
    nestedMapWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.VARCHAR, DataMode.OPTIONAL));
    rootWriter.addRow(30, map("a3", mapValue("c21", "d21")));
    // And another set while the write proceeds.
    nestedMapWriter.addColumn(SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.OPTIONAL));
    rootWriter.addRow(40, map("a4", mapValue("c31", "d31", "e31"), "b4", mapValue("c32", "d32", "e32")));
    // Validate second batch
    actual = fixture.wrap(rsLoader.harvest());
    assertEquals(7, rsLoader.schemaVersion());
    final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addDict("d", MinorType.VARCHAR).mapValue().addNullable("b", MinorType.VARCHAR).addNullable("c", MinorType.VARCHAR).addNullable("d", MinorType.VARCHAR).resumeDict().resumeSchema().buildSchema();
    expected = fixture.rowSetBuilder(expectedSchema).addRow(20, map("a2", mapValue("c11", null, null), "b2", mapValue("c12", null, null), "c2", mapValue("c13", null, null))).addRow(30, map("a3", mapValue("c21", "d21", null))).addRow(40, map("a4", mapValue("c31", "d31", "e31"), "b4", mapValue("c32", "d32", "e32"))).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : DictWriter(org.apache.drill.exec.vector.accessor.DictWriter) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

TupleWriter (org.apache.drill.exec.vector.accessor.TupleWriter)59 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)52 SubOperatorTest (org.apache.drill.test.SubOperatorTest)50 Test (org.junit.Test)50 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)35 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)33 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)26 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)25 ScalarWriter (org.apache.drill.exec.vector.accessor.ScalarWriter)23 TupleReader (org.apache.drill.exec.vector.accessor.TupleReader)20 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)19 ArrayWriter (org.apache.drill.exec.vector.accessor.ArrayWriter)18 SchemaBuilder (org.apache.drill.test.rowSet.schema.SchemaBuilder)16 SingleRowSet (org.apache.drill.test.rowSet.RowSet.SingleRowSet)15 ResultSetLoader (org.apache.drill.exec.physical.rowSet.ResultSetLoader)14 RowSetLoader (org.apache.drill.exec.physical.rowSet.RowSetLoader)14 RowSet (org.apache.drill.test.rowSet.RowSet)13 ArrayReader (org.apache.drill.exec.vector.accessor.ArrayReader)12 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)12 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)10