Search in sources :

Example 81 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestResultSetLoaderMapArray method testBasics.

@Test
public void testBasics() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Verify structure and schema
    TupleMetadata actualSchema = rootWriter.tupleSchema();
    assertEquals(2, actualSchema.size());
    assertTrue(actualSchema.metadata(1).isArray());
    assertTrue(actualSchema.metadata(1).isMap());
    assertEquals(2, actualSchema.metadata("m").tupleSchema().size());
    assertEquals(2, actualSchema.column("m").getChildren().size());
    TupleWriter mapWriter = rootWriter.array("m").tuple();
    assertSame(actualSchema.metadata("m").tupleSchema(), mapWriter.schema().tupleSchema());
    assertSame(mapWriter.tupleSchema(), mapWriter.schema().tupleSchema());
    assertSame(mapWriter.tupleSchema().metadata(0), mapWriter.scalar(0).schema());
    assertSame(mapWriter.tupleSchema().metadata(1), mapWriter.scalar(1).schema());
    // Write a couple of rows with arrays.
    rsLoader.startBatch();
    rootWriter.addRow(10, mapArray(mapValue(110, "d1.1"), mapValue(120, "d2.2"))).addRow(20, mapArray()).addRow(30, mapArray(mapValue(310, "d3.1"), mapValue(320, "d3.2"), mapValue(330, "d3.3")));
    // Verify the first batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    RepeatedMapVector mapVector = (RepeatedMapVector) actual.container().getValueVector(1).getValueVector();
    MaterializedField mapField = mapVector.getField();
    assertEquals(2, mapField.getChildren().size());
    Iterator<MaterializedField> iter = mapField.getChildren().iterator();
    assertTrue(mapWriter.scalar(0).schema().schema().isEquivalent(iter.next()));
    assertTrue(mapWriter.scalar(1).schema().schema().isEquivalent(iter.next()));
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapArray(mapValue(110, "d1.1"), mapValue(120, "d2.2"))).addRow(20, mapArray()).addRow(30, mapArray(mapValue(310, "d3.1"), mapValue(320, "d3.2"), mapValue(330, "d3.3"))).build();
    RowSetUtilities.verify(expected, actual);
    // In the second, create a row, then add a map member.
    // Should be back-filled to empty for the first row.
    rsLoader.startBatch();
    rootWriter.addRow(40, mapArray(mapValue(410, "d4.1"), mapValue(420, "d4.2")));
    mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.OPTIONAL));
    rootWriter.addRow(50, mapArray(mapValue(510, "d5.1", "e5.1"), mapValue(520, "d5.2", null))).addRow(60, mapArray(mapValue(610, "d6.1", "e6.1"), mapValue(620, "d6.2", null), mapValue(630, "d6.3", "e6.3")));
    // Verify the second batch
    actual = fixture.wrap(rsLoader.harvest());
    mapVector = (RepeatedMapVector) actual.container().getValueVector(1).getValueVector();
    mapField = mapVector.getField();
    assertEquals(3, mapField.getChildren().size());
    TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).addNullable("e", MinorType.VARCHAR).resumeSchema().buildSchema();
    expected = fixture.rowSetBuilder(expectedSchema).addRow(40, mapArray(mapValue(410, "d4.1", null), mapValue(420, "d4.2", null))).addRow(50, mapArray(mapValue(510, "d5.1", "e5.1"), mapValue(520, "d5.2", null))).addRow(60, mapArray(mapValue(610, "d6.1", "e6.1"), mapValue(620, "d6.2", null), mapValue(630, "d6.3", "e6.3"))).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RepeatedMapVector(org.apache.drill.exec.vector.complex.RepeatedMapVector) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) MaterializedField(org.apache.drill.exec.record.MaterializedField) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 82 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestResultSetLoaderMapArray method testOmittedValues.

/**
 * Check that the "fill-empties" logic descends down into
 * a repeated map.
 */
@Test
public void testOmittedValues() {
    TupleMetadata schema = new SchemaBuilder().add("id", MinorType.INT).addMapArray("m").addNullable("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    int mapSkip = 5;
    int entrySkip = 3;
    int rowCount = 1000;
    int entryCount = 10;
    rsLoader.startBatch();
    ArrayWriter maWriter = rootWriter.array("m");
    TupleWriter mWriter = maWriter.tuple();
    for (int i = 0; i < rowCount; i++) {
        rootWriter.start();
        rootWriter.scalar(0).setInt(i);
        if (i % mapSkip != 0) {
            for (int j = 0; j < entryCount; j++) {
                if (j % entrySkip != 0) {
                    mWriter.scalar(0).setInt(i * entryCount + j);
                    mWriter.scalar(1).setString("b-" + i + "." + j);
                }
                maWriter.save();
            }
        }
        rootWriter.save();
    }
    RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(rowCount, result.rowCount());
    RowSetReader reader = result.reader();
    ArrayReader maReader = reader.array("m");
    TupleReader mReader = maReader.tuple();
    for (int i = 0; i < rowCount; i++) {
        assertTrue(reader.next());
        assertEquals(i, reader.scalar(0).getInt());
        if (i % mapSkip == 0) {
            assertEquals(0, maReader.size());
            continue;
        }
        assertEquals(entryCount, maReader.size());
        for (int j = 0; j < entryCount; j++) {
            assertTrue(maReader.next());
            if (j % entrySkip == 0) {
                assertTrue(mReader.scalar(0).isNull());
                assertTrue(mReader.scalar(1).isNull());
            } else {
                assertFalse(mReader.scalar(0).isNull());
                assertFalse(mReader.scalar(1).isNull());
                assertEquals(i * entryCount + j, mReader.scalar(0).getInt());
                assertEquals("b-" + i + "." + j, mReader.scalar(1).getString());
            }
        }
    }
    result.clear();
    rsLoader.close();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 83 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestResultSetLoaderMapArray method testNestedArray.

@Test
public void testNestedArray() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Write a couple of rows with arrays within arrays.
    // (And, of course, the Varchar is actually an array of
    // bytes, so that's three array levels.)
    rsLoader.startBatch();
    rootWriter.addRow(10, mapArray(mapValue(110, strArray("d1.1.1", "d1.1.2")), mapValue(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, mapArray()).addRow(30, mapArray(mapValue(310, strArray("d3.1.1", "d3.2.2")), mapValue(320, strArray()), mapValue(330, strArray("d3.3.1", "d1.2.2"))));
    // Verify the batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, mapArray(mapValue(110, strArray("d1.1.1", "d1.1.2")), mapValue(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, mapArray()).addRow(30, mapArray(mapValue(310, strArray("d3.1.1", "d3.2.2")), mapValue(320, strArray()), mapValue(330, strArray("d3.3.1", "d1.2.2")))).build();
    RowSetUtilities.verify(expected, actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 84 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestResultSetLoaderMapArray method testDoubleNestedArray.

/**
 * Test a doubly-nested array of maps.
 */
@Test
public void testDoubleNestedArray() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m1").add("b", MinorType.INT).addMapArray("m2").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeMap().resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    ScalarWriter aWriter = rootWriter.scalar("a");
    ArrayWriter a1Writer = rootWriter.array("m1");
    TupleWriter m1Writer = a1Writer.tuple();
    ScalarWriter bWriter = m1Writer.scalar("b");
    ArrayWriter a2Writer = m1Writer.array("m2");
    TupleWriter m2Writer = a2Writer.tuple();
    ScalarWriter cWriter = m2Writer.scalar("c");
    ScalarWriter dWriter = m2Writer.array("d").scalar();
    for (int i = 0; i < 5; i++) {
        rootWriter.start();
        aWriter.setInt(i);
        for (int j = 0; j < 4; j++) {
            int a1Key = i + 10 + j;
            bWriter.setInt(a1Key);
            for (int k = 0; k < 3; k++) {
                int a2Key = a1Key * 10 + k;
                cWriter.setInt(a2Key);
                for (int l = 0; l < 2; l++) {
                    dWriter.setString("d-" + (a2Key * 10 + l));
                }
                a2Writer.save();
            }
            a1Writer.save();
        }
        rootWriter.save();
    }
    RowSet results = fixture.wrap(rsLoader.harvest());
    RowSetReader reader = results.reader();
    ScalarReader aReader = reader.scalar("a");
    ArrayReader a1Reader = reader.array("m1");
    TupleReader m1Reader = a1Reader.tuple();
    ScalarReader bReader = m1Reader.scalar("b");
    ArrayReader a2Reader = m1Reader.array("m2");
    TupleReader m2Reader = a2Reader.tuple();
    ScalarReader cReader = m2Reader.scalar("c");
    ArrayReader dArray = m2Reader.array("d");
    ScalarReader dReader = dArray.scalar();
    for (int i = 0; i < 5; i++) {
        assertTrue(reader.next());
        assertEquals(i, aReader.getInt());
        for (int j = 0; j < 4; j++) {
            assertTrue(a1Reader.next());
            int a1Key = i + 10 + j;
            assertEquals(a1Key, bReader.getInt());
            for (int k = 0; k < 3; k++) {
                assertTrue(a2Reader.next());
                int a2Key = a1Key * 10 + k;
                assertEquals(a2Key, cReader.getInt());
                for (int l = 0; l < 2; l++) {
                    assertTrue(dArray.next());
                    assertEquals("d-" + (a2Key * 10 + l), dReader.getString());
                }
            }
        }
    }
    rsLoader.close();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) ScalarReader(org.apache.drill.exec.vector.accessor.ScalarReader) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 85 with SchemaBuilder

use of org.apache.drill.exec.record.metadata.SchemaBuilder in project drill by apache.

the class TestResultSetLoaderOmittedValues method testSkipRows.

/**
 * Test that omitting the call to saveRow() effectively discards
 * the row. Note that the vectors still contain values in the
 * discarded position; just the various pointers are unset. If
 * the batch ends before the discarded values are overwritten, the
 * discarded values just exist at the end of the vector. Since vectors
 * start with garbage contents, the discarded values are simply a different
 * kind of garbage. But, if the client writes a new row, then the new
 * row overwrites the discarded row. This works because we only change
 * the tail part of a vector; never the internals.
 */
@Test
public void testSkipRows() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addNullable("b", MinorType.VARCHAR).buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().rowCountLimit(ValueVector.MAX_ROW_COUNT).readerSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    rsLoader.startBatch();
    int rowNumber = 0;
    for (int i = 0; i < 14; i++) {
        rootWriter.start();
        rowNumber++;
        rootWriter.scalar(0).setInt(rowNumber);
        if (i % 3 == 0) {
            rootWriter.scalar(1).setNull();
        } else {
            rootWriter.scalar(1).setString("b-" + rowNumber);
        }
        if (i % 2 == 0) {
            rootWriter.save();
        }
    }
    RowSet result = fixture.wrap(rsLoader.harvest());
    // result.print();
    SingleRowSet expected = fixture.rowSetBuilder(result.batchSchema()).addRow(1, null).addRow(3, "b-3").addRow(5, "b-5").addRow(7, null).addRow(9, "b-9").addRow(11, "b-11").addRow(13, null).build();
    // expected.print();
    RowSetUtilities.verify(expected, result);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)1095 Test (org.junit.Test)1020 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)1008 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)588 SubOperatorTest (org.apache.drill.test.SubOperatorTest)407 RowSetBuilder (org.apache.drill.exec.physical.rowSet.RowSetBuilder)288 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)263 ClusterTest (org.apache.drill.test.ClusterTest)245 EvfTest (org.apache.drill.categories.EvfTest)203 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)188 JsonTest (org.apache.drill.categories.JsonTest)110 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)108 DirectRowSet (org.apache.drill.exec.physical.rowSet.DirectRowSet)108 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)85 BatchSchemaBuilder (org.apache.drill.exec.record.BatchSchemaBuilder)83 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)68 UserException (org.apache.drill.common.exceptions.UserException)62 BatchSchema (org.apache.drill.exec.record.BatchSchema)62 VectorContainer (org.apache.drill.exec.record.VectorContainer)58 BaseTest (org.apache.drill.test.BaseTest)57