Search in sources :

Example 1 with TupleMetadata

use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by axbaretto.

the class TestResultSetLoaderMapArray method testBasics.

@Test
public void testBasics() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Verify structure and schema
    TupleMetadata actualSchema = rootWriter.schema();
    assertEquals(2, actualSchema.size());
    assertTrue(actualSchema.metadata(1).isArray());
    assertTrue(actualSchema.metadata(1).isMap());
    assertEquals(2, actualSchema.metadata("m").mapSchema().size());
    assertEquals(2, actualSchema.column("m").getChildren().size());
    // Write a couple of rows with arrays.
    rsLoader.startBatch();
    rootWriter.addRow(10, objArray(objArray(110, "d1.1"), objArray(120, "d2.2"))).addRow(20, objArray()).addRow(30, objArray(objArray(310, "d3.1"), objArray(320, "d3.2"), objArray(330, "d3.3")));
    // Verify the first batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(objArray(110, "d1.1"), objArray(120, "d2.2"))).addRow(20, objArray()).addRow(30, objArray(objArray(310, "d3.1"), objArray(320, "d3.2"), objArray(330, "d3.3"))).build();
    new RowSetComparison(expected).verifyAndClearAll(actual);
    // In the second, create a row, then add a map member.
    // Should be back-filled to empty for the first row.
    rsLoader.startBatch();
    rootWriter.addRow(40, objArray(objArray(410, "d4.1"), objArray(420, "d4.2")));
    TupleWriter mapWriter = rootWriter.array("m").tuple();
    mapWriter.addColumn(SchemaBuilder.columnSchema("e", MinorType.VARCHAR, DataMode.OPTIONAL));
    rootWriter.addRow(50, objArray(objArray(510, "d5.1", "e5.1"), objArray(520, "d5.2", null))).addRow(60, objArray(objArray(610, "d6.1", "e6.1"), objArray(620, "d6.2", null), objArray(630, "d6.3", "e6.3")));
    // Verify the second batch
    actual = fixture.wrap(rsLoader.harvest());
    TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).add("d", MinorType.VARCHAR).addNullable("e", MinorType.VARCHAR).resumeSchema().buildSchema();
    expected = fixture.rowSetBuilder(expectedSchema).addRow(40, objArray(objArray(410, "d4.1", null), objArray(420, "d4.2", null))).addRow(50, objArray(objArray(510, "d5.1", "e5.1"), objArray(520, "d5.2", null))).addRow(60, objArray(objArray(610, "d6.1", "e6.1"), objArray(620, "d6.2", null), objArray(630, "d6.3", "e6.3"))).build();
    new RowSetComparison(expected).verifyAndClearAll(actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.test.rowSet.RowSet.SingleRowSet) RowSetComparison(org.apache.drill.test.rowSet.RowSetComparison) ResultSetLoader(org.apache.drill.exec.physical.rowSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.test.rowSet.schema.SchemaBuilder) SingleRowSet(org.apache.drill.test.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.test.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.rowSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 2 with TupleMetadata

use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by axbaretto.

the class TestResultSetLoaderMapArray method testNestedArray.

@Test
public void testNestedArray() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("c", MinorType.INT).addArray("d", MinorType.VARCHAR).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Write a couple of rows with arrays within arrays.
    // (And, of course, the Varchar is actually an array of
    // bytes, so that's three array levels.)
    rsLoader.startBatch();
    rootWriter.addRow(10, objArray(objArray(110, strArray("d1.1.1", "d1.1.2")), objArray(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, objArray()).addRow(30, objArray(objArray(310, strArray("d3.1.1", "d3.2.2")), objArray(320, strArray()), objArray(330, strArray("d3.3.1", "d1.2.2"))));
    // Verify the batch
    RowSet actual = fixture.wrap(rsLoader.harvest());
    SingleRowSet expected = fixture.rowSetBuilder(schema).addRow(10, objArray(objArray(110, strArray("d1.1.1", "d1.1.2")), objArray(120, strArray("d1.2.1", "d1.2.2")))).addRow(20, objArray()).addRow(30, objArray(objArray(310, strArray("d3.1.1", "d3.2.2")), objArray(320, strArray()), objArray(330, strArray("d3.3.1", "d1.2.2")))).build();
    new RowSetComparison(expected).verifyAndClearAll(actual);
    rsLoader.close();
}
Also used : SingleRowSet(org.apache.drill.test.rowSet.RowSet.SingleRowSet) RowSetComparison(org.apache.drill.test.rowSet.RowSetComparison) ResultSetLoader(org.apache.drill.exec.physical.rowSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.test.rowSet.schema.SchemaBuilder) SingleRowSet(org.apache.drill.test.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.test.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.rowSet.RowSetLoader) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 3 with TupleMetadata

use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by axbaretto.

the class TestResultSetLoaderMapArray method testOverwriteRow.

/**
 * Version of the {#link TestResultSetLoaderProtocol#testOverwriteRow()} test
 * that uses nested columns inside an array of maps. Here we must call
 * <tt>start()</tt> to reset the array back to the initial start position after
 * each "discard."
 */
@Test
public void testOverwriteRow() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).addMapArray("m").add("b", MinorType.INT).add("c", MinorType.VARCHAR).resumeSchema().buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new OptionBuilder().setSchema(schema).setRowCountLimit(ValueVector.MAX_ROW_COUNT).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Can't use the shortcut to populate rows when doing overwrites.
    ScalarWriter aWriter = rootWriter.scalar("a");
    ArrayWriter maWriter = rootWriter.array("m");
    TupleWriter mWriter = maWriter.tuple();
    ScalarWriter bWriter = mWriter.scalar("b");
    ScalarWriter cWriter = mWriter.scalar("c");
    // Write 100,000 rows, overwriting 99% of them. This will cause vector
    // overflow and data corruption if overwrite does not work; but will happily
    // produce the correct result if everything works as it should.
    byte[] value = new byte[512];
    Arrays.fill(value, (byte) 'X');
    int count = 0;
    rsLoader.startBatch();
    while (count < 10_000) {
        rootWriter.start();
        count++;
        aWriter.setInt(count);
        for (int i = 0; i < 10; i++) {
            bWriter.setInt(count * 10 + i);
            cWriter.setBytes(value, value.length);
            maWriter.save();
        }
        if (count % 100 == 0) {
            rootWriter.save();
        }
    }
    // Verify using a reader.
    RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(count / 100, result.rowCount());
    RowSetReader reader = result.reader();
    ArrayReader maReader = reader.array("m");
    TupleReader mReader = maReader.tuple();
    int rowId = 1;
    while (reader.next()) {
        assertEquals(rowId * 100, reader.scalar("a").getInt());
        assertEquals(10, maReader.size());
        for (int i = 0; i < 10; i++) {
            maReader.setPosn(i);
            assertEquals(rowId * 1000 + i, mReader.scalar("b").getInt());
            assertTrue(Arrays.equals(value, mReader.scalar("c").getBytes()));
        }
        rowId++;
    }
    result.clear();
    rsLoader.close();
}
Also used : TupleReader(org.apache.drill.exec.vector.accessor.TupleReader) SingleRowSet(org.apache.drill.test.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.test.rowSet.RowSet) ArrayReader(org.apache.drill.exec.vector.accessor.ArrayReader) ResultSetLoader(org.apache.drill.exec.physical.rowSet.ResultSetLoader) TupleWriter(org.apache.drill.exec.vector.accessor.TupleWriter) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.test.rowSet.schema.SchemaBuilder) RowSetLoader(org.apache.drill.exec.physical.rowSet.RowSetLoader) ArrayWriter(org.apache.drill.exec.vector.accessor.ArrayWriter) RowSetReader(org.apache.drill.test.rowSet.RowSetReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 4 with TupleMetadata

use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by axbaretto.

the class TestTupleSchema method testEmptyRootTuple.

// Repeated list
/**
 * Test the basics of an empty root tuple (i.e. row) schema.
 */
@Test
public void testEmptyRootTuple() {
    TupleMetadata root = new TupleSchema();
    assertEquals(0, root.size());
    assertTrue(root.isEmpty());
    assertEquals(-1, root.index("foo"));
    try {
        root.metadata(0);
        fail();
    } catch (IndexOutOfBoundsException e) {
    // Expected
    }
    assertNull(root.metadata("foo"));
    try {
        root.column(0);
        fail();
    } catch (IndexOutOfBoundsException e) {
    // Expected
    }
    assertNull(root.column("foo"));
    try {
        root.fullName(0);
        fail();
    } catch (IndexOutOfBoundsException e) {
    // Expected
    }
    // The full name method does not check if the column is actually
    // in the tuple.
    MaterializedField field = SchemaBuilder.columnSchema("c", MinorType.INT, DataMode.REQUIRED);
    ColumnMetadata col = MetadataUtils.fromField(field);
    assertEquals("c", root.fullName(col));
    assertTrue(root.isEquivalent(root));
    assertNull(root.parent());
    assertTrue(root.toFieldList().isEmpty());
}
Also used : MapColumnMetadata(org.apache.drill.exec.record.metadata.MapColumnMetadata) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) PrimitiveColumnMetadata(org.apache.drill.exec.record.metadata.PrimitiveColumnMetadata) VariantColumnMetadata(org.apache.drill.exec.record.metadata.VariantColumnMetadata) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) TupleSchema(org.apache.drill.exec.record.metadata.TupleSchema) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 5 with TupleMetadata

use of org.apache.drill.exec.record.metadata.TupleMetadata in project drill by axbaretto.

the class TestTupleSchema method testNestedSchema.

@Test
public void testNestedSchema() {
    TupleMetadata schema = new SchemaBuilder().addList("list").addType(MinorType.BIGINT).addType(MinorType.VARCHAR).addMap().add("a", MinorType.INT).add("b", MinorType.VARCHAR).resumeUnion().addList().addType(MinorType.FLOAT8).addType(MinorType.DECIMAL18).buildNested().resumeSchema().buildSchema();
    assertEquals(1, schema.size());
    ColumnMetadata col = schema.metadata(0);
    assertTrue(col.isVariant());
    VariantMetadata union = col.variantSchema();
    assertNotNull(union);
    assertEquals(4, union.size());
    assertTrue(union.hasType(MinorType.MAP));
    assertTrue(union.hasType(MinorType.LIST));
    ColumnMetadata mapCol = union.member(MinorType.MAP);
    TupleMetadata mapSchema = mapCol.mapSchema();
    assertEquals(2, mapSchema.size());
    ColumnMetadata listCol = union.member(MinorType.LIST);
    VariantMetadata listSchema = listCol.variantSchema();
    assertEquals(2, listSchema.size());
    assertTrue(listSchema.hasType(MinorType.FLOAT8));
    assertTrue(listSchema.hasType(MinorType.DECIMAL18));
}
Also used : VariantMetadata(org.apache.drill.exec.record.metadata.VariantMetadata) MapColumnMetadata(org.apache.drill.exec.record.metadata.MapColumnMetadata) ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) PrimitiveColumnMetadata(org.apache.drill.exec.record.metadata.PrimitiveColumnMetadata) VariantColumnMetadata(org.apache.drill.exec.record.metadata.VariantColumnMetadata) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.test.rowSet.schema.SchemaBuilder) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Aggregations

TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)1235 Test (org.junit.Test)1126 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)1008 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)598 SubOperatorTest (org.apache.drill.test.SubOperatorTest)460 RowSetBuilder (org.apache.drill.exec.physical.rowSet.RowSetBuilder)293 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)264 ClusterTest (org.apache.drill.test.ClusterTest)261 EvfTest (org.apache.drill.categories.EvfTest)230 RowSetComparison (org.apache.drill.test.rowSet.RowSetComparison)211 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)111 JsonTest (org.apache.drill.categories.JsonTest)110 DirectRowSet (org.apache.drill.exec.physical.rowSet.DirectRowSet)109 BaseTest (org.apache.drill.test.BaseTest)106 ColumnMetadata (org.apache.drill.exec.record.metadata.ColumnMetadata)100 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)89 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)72 ScalarWriter (org.apache.drill.exec.vector.accessor.ScalarWriter)69 UserException (org.apache.drill.common.exceptions.UserException)67 SchemaBuilder (org.apache.drill.test.rowSet.schema.SchemaBuilder)65