Search in sources :

Example 36 with RowSet

use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.

the class TestResultSetLoaderMapArray method testDoubleNestedArray.

/**
 * Test a doubly-nested array of maps.
 */
@Test
public void testDoubleNestedArray() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m1").add("b", MinorType.INT).addMapArray("m2").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    ScalarWriter aWriter = rootWriter.scalar("a");
    ArrayWriter a1Writer = rootWriter.array("m1");
    TupleWriter m1Writer = a1Writer.tuple();
    ScalarWriter bWriter = m1Writer.scalar("b");
    ArrayWriter a2Writer = m1Writer.array("m2");
    TupleWriter m2Writer = a2Writer.tuple();
    ScalarWriter cWriter = m2Writer.scalar("c");
    ScalarWriter dWriter = m2Writer.array("d").scalar();
    for (int i = 0; i < 5; i++) {
        rootWriter.start();
        aWriter.setInt(i);
        for (int j = 0; j < 4; j++) {
            int a1Key = i + 10 + j;
            bWriter.setInt(a1Key);
            for (int k = 0; k < 3; k++) {
                int a2Key = a1Key * 10 + k;
                cWriter.setInt(a2Key);
                for (int l = 0; l < 2; l++) {
                    dWriter.setString("d-" + (a2Key * 10 + l));
                }
                a2Writer.save();
            }
            a1Writer.save();
        }
        rootWriter.save();
    }
    RowSet results = fixture.wrap(rsLoader.harvest());
    RowSetReader reader = results.reader();
    ScalarReader aReader = reader.scalar("a");
    ArrayReader a1Reader = reader.array("m1");
    TupleReader m1Reader = a1Reader.tuple();
    ScalarReader bReader = m1Reader.scalar("b");
    ArrayReader a2Reader = m1Reader.array("m2");
    TupleReader m2Reader = a2Reader.tuple();
    ScalarReader cReader = m2Reader.scalar("c");
    ArrayReader dArray = m2Reader.array("d");
    ScalarReader dReader = dArray.scalar();
    for (int i = 0; i < 5; i++) {
        assertTrue(reader.next());
        assertEquals(i, aReader.getInt());
        for (int j = 0; j < 4; j++) {
            assertTrue(a1Reader.next());
            int a1Key = i + 10 + j;
            assertEquals(a1Key, bReader.getInt());
            for (int k = 0; k < 3; k++) {
                assertTrue(a2Reader.next());
                int a2Key = a1Key * 10 + k;
                assertEquals(a2Key, cReader.getInt());
                for (int l = 0; l < 2; l++) {
                    assertTrue(dArray.next());
                    assertEquals("d-" + (a2Key * 10 + l), dReader.getString());
                }
            }
        }
    }
    rsLoader.close();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 37 with RowSet

use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.

the class TestResultSetLoaderOmittedValues method testSkipRows.

/**
 * Test that omitting the call to saveRow() effectively discards
 * the row. Note that the vectors still contain values in the
 * discarded position; just the various pointers are unset. If
 * the batch ends before the discarded values are overwritten, the
 * discarded values just exist at the end of the vector. Since vectors
 * start with garbage contents, the discarded values are simply a different
 * kind of garbage. But, if the client writes a new row, then the new
 * row overwrites the discarded row. This works because we only change
 * the tail part of a vector; never the internals.
 */
@Test
public void testSkipRows() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    int rowNumber = 0;
    for (int i = 0; i < 14; i++) {
        rootWriter.start();
        rowNumber++;
        rootWriter.scalar(0).setInt(rowNumber);
        if (i % 3 == 0) {
            rootWriter.scalar(1).setNull();
        } else {
            rootWriter.scalar(1).setString("b-" + rowNumber);
        }
        if (i % 2 == 0) {
            rootWriter.save();
        }
    }
    RowSet result = fixture.wrap(rsLoader.harvest());
    // result.print();
    SingleRowSet expected = fixture.rowSetBuilder(result.batchSchema()).addRow(1, null).addRow(3, "b-3").addRow(5, "b-5").addRow(7, null).addRow(9, "b-9").addRow(11, "b-11").addRow(13, null).build();
    // expected.print();
    RowSetUtilities.verify(expected, result);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 38 with RowSet

use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.

the class TestResultSetLoaderOmittedValues method testOmittedValuesAtEndWithOverflow.

/**
 * Test "holes" at the end of a batch when batch overflows. Completed
 * batch must be finalized correctly, new batch initialized correct,
 * for the missing values.
 */
@Test
public void testOmittedValuesAtEndWithOverflow() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).addNullable("c", MinorType.VARCHAR).addNullable("d", MinorType.VARCHAR).buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Fill the batch. Column d has some values. Column c is worst case: no values.
    rsLoader.startBatch();
    byte[] value = new byte[533];
    Arrays.fill(value, (byte) 'X');
    int rowNumber = 0;
    while (!rootWriter.isFull()) {
        rootWriter.start();
        rowNumber++;
        rootWriter.scalar(0).setInt(rowNumber);
        rootWriter.scalar(1).setBytes(value, value.length);
        if (rowNumber < 10_000) {
            rootWriter.scalar(3).setString("d-" + rowNumber);
        }
        rootWriter.save();
        assertEquals(rowNumber, rsLoader.totalRowCount());
    }
    // Harvest and verify
    RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(rowNumber - 1, result.rowCount());
    RowSetReader reader = result.reader();
    int rowIndex = 0;
    while (reader.next()) {
        int expectedRowNumber = 1 + rowIndex;
        assertEquals(expectedRowNumber, reader.scalar(0).getInt());
        assertTrue(reader.scalar(2).isNull());
        if (expectedRowNumber < 10_000) {
            assertEquals("d-" + expectedRowNumber, reader.scalar(3).getString());
        } else {
            assertTrue(reader.scalar(3).isNull());
        }
        rowIndex++;
    }
    // Start count for this batch is one less than current
    // count, because of the overflow row.
    int startRowNumber = rowNumber;
    // Write a few more rows to the next batch
    rsLoader.startBatch();
    for (int i = 0; i < 10; i++) {
        rootWriter.start();
        rowNumber++;
        rootWriter.scalar(0).setInt(rowNumber);
        rootWriter.scalar(1).setBytes(value, value.length);
        if (i > 5) {
            rootWriter.scalar(3).setString("d-" + rowNumber);
        }
        rootWriter.save();
        assertEquals(rowNumber, rsLoader.totalRowCount());
    }
    // Verify that holes were preserved.
    result = fixture.wrap(rsLoader.harvest());
    assertEquals(rowNumber, rsLoader.totalRowCount());
    assertEquals(rowNumber - startRowNumber + 1, result.rowCount());
    // result.print();
    reader = result.reader();
    rowIndex = 0;
    while (reader.next()) {
        int expectedRowNumber = startRowNumber + rowIndex;
        assertEquals(expectedRowNumber, reader.scalar(0).getInt());
        assertTrue(reader.scalar(2).isNull());
        if (rowIndex > 6) {
            assertEquals("d-" + expectedRowNumber, reader.scalar(3).getString());
        } else {
            assertTrue("Row " + rowIndex + " col d should be null", reader.scalar(3).isNull());
        }
        rowIndex++;
    }
    assertEquals(rowIndex, 11);
    rsLoader.close();
}
Also used : ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 39 with RowSet

use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.

the class TestResultSetLoaderOmittedValues method testSkipOverflowRow.

/**
 * Test that discarding a row works even if that row happens to be an
 * overflow row.
 */
@Test
public void testSkipOverflowRow() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    byte[] value = new byte[512];
    Arrays.fill(value, (byte) 'X');
    int count = 0;
    while (!rootWriter.isFull()) {
        rootWriter.start();
        rootWriter.scalar(0).setInt(count);
        rootWriter.scalar(1).setBytes(value, value.length);
        if (!rootWriter.isFull()) {
            rootWriter.save();
        }
        count++;
    }
    // Discard the results.
    rsLoader.harvest().zeroVectors();
    // Harvest the next batch. Will be empty (because overflow row
    // was discarded.)
    rsLoader.startBatch();
    RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(0, result.rowCount());
    result.clear();
    rsLoader.close();
}
Also used : ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 40 with RowSet

use of org.apache.drill.exec.physical.rowSet.RowSet in project drill by apache.

the class TestResultSetLoaderDictArray method testScalarValue.

@Test
public void testScalarValue() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addDictArray("d", MinorType.VARCHAR).value(MinorType.INT).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Write a couple of rows
    rsLoader.startBatch();
    rootWriter.addRow(10, objArray(map("a", 1, "b", 2, "d", 4), map("a", 2, "c", 3, "d", 1, "e", 4))).addRow(20, objArray()).addRow(30, objArray(map("a", 2, "c", 4, "d", 5, "e", 6, "f", 11), map("a", 1, "d", 6, "c", 3), map("b", 2, "a", 3)));
    // Verify the batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(map("a", 1, "b", 2, "d", 4), map("a", 2, "c", 3, "d", 1, "e", 4))).addRow(20, objArray()).addRow(30, objArray(map("a", 2, "c", 4, "d", 5, "e", 6, "f", 11), map("a", 1, "d", 6, "c", 3), map("b", 2, "a", 3))).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

RowSet (org.apache.drill.exec.physical.rowSet.RowSet)725 Test (org.junit.Test)690 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)583 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)574 RowSetBuilder (org.apache.drill.exec.physical.rowSet.RowSetBuilder)297 ClusterTest (org.apache.drill.test.ClusterTest)253 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)233 DirectRowSet (org.apache.drill.exec.physical.rowSet.DirectRowSet)137 SubOperatorTest (org.apache.drill.test.SubOperatorTest)128 JsonTest (org.apache.drill.categories.JsonTest)112 EvfTest (org.apache.drill.categories.EvfTest)107 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)97 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)63 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)61 QueryBuilder (org.apache.drill.test.QueryBuilder)61 MockRecordBatch (org.apache.drill.exec.physical.impl.MockRecordBatch)60 OperatorTest (org.apache.drill.categories.OperatorTest)53 VectorContainer (org.apache.drill.exec.record.VectorContainer)31 RowBatchReader (org.apache.drill.exec.physical.impl.scan.RowBatchReader)28 QuerySummary (org.apache.drill.test.QueryBuilder.QuerySummary)27