Search in sources :

Example 26 with RowSetLoader

use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.

the class TestResultSetLoaderMaps method testNestedMapsRequired.

/**
 * Create nested maps. Then, add columns to each map
 * on the fly. Use required, variable-width columns since
 * those require the most processing and are most likely to
 * fail if anything is out of place.
 */
@Test
public void testNestedMapsRequired() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").add("b", MinorType.VARCHAR).addMap("m2").add("c", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    assertEquals(5, rsLoader.schemaVersion());
    final RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    rootWriter.addRow(10, mapValue("b1", mapValue("c1")));
    // Validate first batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    assertEquals(5, rsLoader.schemaVersion());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue("b1", mapValue("c1"))).build();
    RowSetUtilities.verify(expected, actual);
    // Now add columns in the second batch.
    rsLoader.startBatch();
    rootWriter.addRow(20, mapValue("b2", mapValue("c2")));
    final TupleWriter m1Writer = rootWriter.tuple("m1");
    m1Writer.addColumn(SchemaBuilder.columnSchema("d", MinorType.VARCHAR, DataMode.REQUIRED));
    final TupleWriter m2Writer = m1Writer.tuple("m2");
    m2Writer.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.REQUIRED));
    rootWriter.addRow(30, mapValue("b3", mapValue("c3", "e3"), "d3"));
    // And another set while the write proceeds.
    m1Writer.addColumn(SchemaBuilder.columnSchema("f", MinorType.VARCHAR, DataMode.REQUIRED));
    m2Writer.addColumn(SchemaBuilder.columnSchema("g", MinorType.VARCHAR, DataMode.REQUIRED));
    rootWriter.addRow(40, mapValue("b4", mapValue("c4", "e4", "g4"), "d4", "e4"));
    // Validate second batch
    actual = fixture.wrap(rsLoader.harvest());
    assertEquals(9, rsLoader.schemaVersion());
    final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").add("b", MinorType.VARCHAR).addMap("m2").add("c", MinorType.VARCHAR).add("e", MinorType.VARCHAR).add("g", MinorType.VARCHAR).resumeMap().add("d", MinorType.VARCHAR).add("f", MinorType.VARCHAR).resumeSchema().buildSchema();
    expected = fixture.rowSetBuilder(expectedSchema).addRow(20, mapValue("b2", mapValue("c2", "", ""), "", "")).addRow(30, mapValue("b3", mapValue("c3", "e3", ""), "d3", "")).addRow(40, mapValue("b4", mapValue("c4", "e4", "g4"), "d4", "e4")).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 27 with RowSetLoader

use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.

the class TestResultSetLoaderMaps method testBasics.

@Test
public void testBasics() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).resumeSchema().add("e", MinorType.VARCHAR).buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    assertFalse(rsLoader.isProjectionEmpty());
    final RowSetLoader rootWriter = rsLoader.writer();
    // Verify structure and schema
    assertEquals(5, rsLoader.schemaVersion());
    final TupleMetadata actualSchema = rootWriter.tupleSchema();
    assertEquals(3, actualSchema.size());
    assertTrue(actualSchema.metadata(1).isMap());
    assertEquals(2, actualSchema.metadata("m").tupleSchema().size());
    assertEquals(2, actualSchema.column("m").getChildren().size());
    rsLoader.startBatch();
    // Write a row the way that clients will do.
    final ScalarWriter aWriter = rootWriter.scalar("a");
    final TupleWriter mWriter = rootWriter.tuple("m");
    final ScalarWriter cWriter = mWriter.scalar("c");
    final ScalarWriter dWriter = mWriter.scalar("d");
    final ScalarWriter eWriter = rootWriter.scalar("e");
    rootWriter.start();
    aWriter.setInt(10);
    cWriter.setInt(110);
    dWriter.setString("fred");
    eWriter.setString("pebbles");
    rootWriter.save();
    // Try adding a duplicate column.
    try {
        mWriter.addColumn(SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.OPTIONAL));
        fail();
    } catch (final UserException e) {
    // Expected
    }
    // Write another using the test-time conveniences
    rootWriter.addRow(20, mapValue(210, "barney"), "bam-bam");
    // Harvest the batch
    final RowSet actual = fixture.wrap(rsLoader.harvest());
    assertEquals(5, rsLoader.schemaVersion());
    assertEquals(2, actual.rowCount());
    final MapVector mapVector = (MapVector) actual.container().getValueVector(1).getValueVector();
    assertEquals(2, mapVector.getAccessor().getValueCount());
    // Validate data
    final SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapValue(110, "fred"), "pebbles").addRow(20, mapValue(210, "barney"), "bam-bam").build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) UserException(org.apache.drill.common.exceptions.UserException) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) MapVector(org.apache.drill.exec.vector.complex.MapVector) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 28 with RowSetLoader

use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.

the class TestResultSetLoaderMaps method testOverwriteRow.

/**
 * Version of the {#link TestResultSetLoaderProtocol#testOverwriteRow()} test
 * that uses nested columns.
 */
@Test
public void testOverwriteRow() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    final RowSetLoader rootWriter = rsLoader.writer();
    // Can't use the shortcut to populate rows when doing overwrites.
    final ScalarWriter aWriter = rootWriter.scalar("a");
    final TupleWriter mWriter = rootWriter.tuple("m");
    final ScalarWriter bWriter = mWriter.scalar("b");
    final ScalarWriter cWriter = mWriter.scalar("c");
    // Write 100,000 rows, overwriting 99% of them. This will cause vector
    // overflow and data corruption if overwrite does not work; but will happily
    // produce the correct result if everything works as it should.
    final byte[] value = new byte[512];
    Arrays.fill(value, (byte) 'X');
    int count = 0;
    rsLoader.startBatch();
    while (count < 100_000) {
        rootWriter.start();
        count++;
        aWriter.setInt(count);
        bWriter.setInt(count * 10);
        cWriter.setBytes(value, value.length);
        if (count % 100 == 0) {
            rootWriter.save();
        }
    }
    // Verify using a reader.
    final RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(count / 100, result.rowCount());
    final RowSetReader reader = result.reader();
    final TupleReader mReader = reader.tuple("m");
    int rowId = 1;
    while (reader.next()) {
        assertEquals(rowId * 100, reader.scalar("a").getInt());
        assertEquals(rowId * 1000, mReader.scalar("b").getInt());
        assertTrue(Arrays.equals(value, mReader.scalar("c").getBytes()));
        rowId++;
    }
    result.clear();
    rsLoader.close();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 29 with RowSetLoader

use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.

the class TestResultSetLoaderMaps method testMapWithOverflow.

/**
 * Create a schema with a map, then trigger an overflow on one of the columns
 * in the map. Proper overflow handling should occur regardless of nesting
 * depth.
 */
@Test
public void testMapWithOverflow() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMap("m1").add("b", MinorType.INT).addMap("m2").add("c", // Before overflow, written
    MinorType.INT).add("d", MinorType.VARCHAR).add("e", // After overflow, not yet written
    MinorType.INT).resumeMap().resumeSchema().buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    final RowSetLoader rootWriter = rsLoader.writer();
    final byte[] value = new byte[512];
    Arrays.fill(value, (byte) 'X');
    int count = 0;
    rsLoader.startBatch();
    while (!rootWriter.isFull()) {
        rootWriter.addRow(count, mapValue(count * 10, mapValue(count * 100, value, count * 1000)));
        count++;
    }
    // Our row count should include the overflow row
    final int expectedCount = ValueVector.MAX_BUFFER_SIZE / value.length;
    assertEquals(expectedCount + 1, count);
    // Loader's row count should include only "visible" rows
    assertEquals(expectedCount, rootWriter.rowCount());
    // Total count should include invisible and look-ahead rows.
    assertEquals(expectedCount + 1, rsLoader.totalRowCount());
    // Result should exclude the overflow row
    RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(expectedCount, result.rowCount());
    // Ensure the odd map vector value count variable is set correctly.
    final MapVector m1Vector = (MapVector) result.container().getValueVector(1).getValueVector();
    assertEquals(expectedCount, m1Vector.getAccessor().getValueCount());
    final MapVector m2Vector = (MapVector) m1Vector.getChildByOrdinal(1);
    assertEquals(expectedCount, m2Vector.getAccessor().getValueCount());
    result.clear();
    // Next batch should start with the overflow row
    rsLoader.startBatch();
    assertEquals(1, rootWriter.rowCount());
    assertEquals(expectedCount + 1, rsLoader.totalRowCount());
    result = fixture.wrap(rsLoader.harvest());
    assertEquals(1, result.rowCount());
    result.clear();
    rsLoader.close();
}
Also used : ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) MapVector(org.apache.drill.exec.vector.complex.MapVector) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 30 with RowSetLoader

use of org.apache.drill.exec.physical.resultSet.RowSetLoader in project drill by apache.

the class TestResultSetLoaderMaps method testMapAddition.

/**
 * Test adding a map to a loader after writing the first row.
 */
@Test
public void testMapAddition() {
    final TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).buildSchema();
    final ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    final ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    assertEquals(1, rsLoader.schemaVersion());
    final RowSetLoader rootWriter = rsLoader.writer();
    // Start without the map. Add a map after the first row.
    rsLoader.startBatch();
    rootWriter.addRow(10);
    final int mapIndex = rootWriter.addColumn(SchemaBuilder.columnSchema("m", MinorType.MAP, DataMode.REQUIRED));
    final TupleWriter mapWriter = rootWriter.tuple(mapIndex);
    // Add a column to the map with the same name as the top-level column.
    // Verifies that the name spaces are independent.
    final int colIndex = mapWriter.addColumn(SchemaBuilder.columnSchema("a", MinorType.VARCHAR, DataMode.REQUIRED));
    assertEquals(0, colIndex);
    // Ensure metadata was added
    assertTrue(mapWriter.tupleSchema().size() == 1);
    assertSame(mapWriter.tupleSchema(), mapWriter.schema().tupleSchema());
    assertSame(mapWriter.tupleSchema().metadata(colIndex), mapWriter.scalar(colIndex).schema());
    rootWriter.addRow(20, mapValue("fred")).addRow(30, mapValue("barney"));
    final RowSet actual = fixture.wrap(rsLoader.harvest());
    assertEquals(3, rsLoader.schemaVersion());
    assertEquals(3, actual.rowCount());
    final MapVector mapVector = (MapVector) actual.container().getValueVector(1).getValueVector();
    final MaterializedField mapField = mapVector.getField();
    assertEquals(1, mapField.getChildren().size());
    assertTrue(mapWriter.scalar(colIndex).schema().schema().isEquivalent(mapField.getChildren().iterator().next()));
    // Validate first batch
    final TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMap("m").add("a", MinorType.VARCHAR).resumeSchema().buildSchema();
    final SingleRowSet expected = fixture.rowSetBuilder(expectedSchema).addRow(10, mapValue("")).addRow(20, mapValue("fred")).addRow(30, mapValue("barney")).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) MaterializedField(org.apache.drill.exec.record.MaterializedField) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) MapVector(org.apache.drill.exec.vector.complex.MapVector) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)98 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)90 Test (org.junit.Test)86 SubOperatorTest (org.apache.drill.test.SubOperatorTest)85 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)82 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)82 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)66 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)63 ScalarWriter (org.apache.drill.exec.vector.accessor.ScalarWriter)25 TupleWriter (org.apache.drill.exec.vector.accessor.TupleWriter)25 ResultSetOptions (org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions)23 RowSetReader (org.apache.drill.exec.physical.rowSet.RowSetReader)17 ArrayWriter (org.apache.drill.exec.vector.accessor.ArrayWriter)16 VectorContainer (org.apache.drill.exec.record.VectorContainer)15 SchemaPath (org.apache.drill.common.expression.SchemaPath)12 DictWriter (org.apache.drill.exec.vector.accessor.DictWriter)11 EvfTest (org.apache.drill.categories.EvfTest)10 MaterializedField (org.apache.drill.exec.record.MaterializedField)9 ColumnMetadata (org.apache.drill.exec.record.metadata.ColumnMetadata)6 ArrayReader (org.apache.drill.exec.vector.accessor.ArrayReader)5