Search in sources :

Example 16 with RowSetReader

use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.

the class TestResultSetLoaderProtocol method testOverwriteRow.

/**
 * The writer protocol allows a client to write to a row any number of times
 * before invoking {@code save()}. In this case, each new value simply
 * overwrites the previous value. Here, we test the most basic case: a simple,
 * flat tuple with no arrays. We use a very large Varchar that would, if
 * overwrite were not working, cause vector overflow.
 * <p>
 * The ability to overwrite rows is seldom needed except in one future use
 * case: writing a row, then applying a filter "in-place" to discard unwanted
 * rows, without having to send the row downstream.
 * <p>
 * Because of this use case, specific rules apply when discarding row or
 * overwriting values.
 * <ul>
 * <li>Values can be written once per row. Fixed-width columns actually allow
 * multiple writes. But, because of the way variable-width columns work,
 * multiple writes will cause undefined results.</li>
 * <li>To overwrite a row, call <tt>start()</tt> without calling
 * <tt>save()</tt> on the previous row. Doing so ignores data for the
 * previous row and starts a new row in place of the old one.</li>
 * </ul>
 * Note that there is no explicit method to discard a row. Instead,
 * the rule is that a row is not saved until <tt>save()</tt> is called.
 */
@Test
public void testOverwriteRow() {
    TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
    ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
    ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
    RowSetLoader rootWriter = rsLoader.writer();
    // Can't use the shortcut to populate rows when doing overwrites.
    ScalarWriter aWriter = rootWriter.scalar("a");
    ScalarWriter bWriter = rootWriter.scalar("b");
    // Write 100,000 rows, overwriting 99% of them. This will cause vector
    // overflow and data corruption if overwrite does not work; but will happily
    // produce the correct result if everything works as it should.
    byte[] value = new byte[512];
    Arrays.fill(value, (byte) 'X');
    int count = 0;
    rsLoader.startBatch();
    while (count < 100_000) {
        rootWriter.start();
        count++;
        aWriter.setInt(count);
        bWriter.setBytes(value, value.length);
        if (count % 100 == 0) {
            rootWriter.save();
        }
    }
    // Verify using a reader.
    RowSet result = fixture.wrap(rsLoader.harvest());
    assertEquals(count / 100, result.rowCount());
    RowSetReader reader = result.reader();
    int rowId = 1;
    while (reader.next()) {
        assertEquals(rowId * 100, reader.scalar("a").getInt());
        assertTrue(Arrays.equals(value, reader.scalar("b").getBytes()));
        rowId++;
    }
    result.clear();
    rsLoader.close();
}
Also used : ResultSetLoader(org.apache.drill.exec.physical.resultSet.ResultSetLoader) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) SingleRowSet(org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetLoader(org.apache.drill.exec.physical.resultSet.RowSetLoader) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) ScalarWriter(org.apache.drill.exec.vector.accessor.ScalarWriter) SubOperatorTest(org.apache.drill.test.SubOperatorTest) Test(org.junit.Test)

Example 17 with RowSetReader

use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.

the class TestMockPlugin method testVarChar.

@Test
public void testVarChar() throws RpcException {
    String sql = "SELECT name_s17 FROM `mock`.`employee_100`";
    RowSet result = client.queryBuilder().sql(sql).rowSet();
    TupleMetadata schema = result.schema();
    assertEquals(1, schema.size());
    ColumnMetadata col = schema.metadata(0);
    assertEquals("name_s17", col.name());
    assertEquals(MinorType.VARCHAR, col.type());
    assertEquals(DataMode.REQUIRED, col.mode());
    assertEquals(100, result.rowCount());
    RowSetReader reader = result.reader();
    while (reader.next()) {
        assertEquals(17, reader.scalar(0).getString().length());
    }
    result.clear();
}
Also used : ColumnMetadata(org.apache.drill.exec.record.metadata.ColumnMetadata) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) ClusterTest(org.apache.drill.test.ClusterTest) Test(org.junit.Test) UnlikelyTest(org.apache.drill.categories.UnlikelyTest)

Example 18 with RowSetReader

use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.

the class TestCsvWithHeaders method testHugeColumn.

@Test
public void testHugeColumn() throws IOException {
    String fileName = buildBigColFile(true);
    RowSet actual = client.queryBuilder().sql(makeStatement(fileName)).rowSet();
    assertEquals(10, actual.rowCount());
    RowSetReader reader = actual.reader();
    while (reader.next()) {
        int i = reader.logicalIndex();
        assertEquals(Integer.toString(i + 1), reader.scalar(0).getString());
        String big = reader.scalar(1).getString();
        assertEquals(BIG_COL_SIZE, big.length());
        for (int j = 0; j < BIG_COL_SIZE; j++) {
            assertEquals((char) ((j + i) % 26 + 'A'), big.charAt(j));
        }
        assertEquals(Integer.toString((i + 1) * 10), reader.scalar(2).getString());
    }
    actual.clear();
}
Also used : DirectRowSet(org.apache.drill.exec.physical.rowSet.DirectRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) Test(org.junit.Test) EvfTest(org.apache.drill.categories.EvfTest)

Example 19 with RowSetReader

use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.

the class TestCsvWithHeaders method testWildcardAndPartitionsMultiFiles.

/**
 * Test the use of partition columns with the wildcard. This works for file
 * metadata columns, but confuses the project operator when used for
 * partition columns. DRILL-7080. Still broken in V3 because this appears
 * to be a Project operator issue, not reader issue. Not that the
 * partition column moves after data columns.
 */
@Test
public void testWildcardAndPartitionsMultiFiles() {
    String sql = "SELECT *, dir0, dir1 FROM `dfs.data`.`%s`";
    Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
    TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).addNullable("dir1", MinorType.VARCHAR).addNullable("dir00", MinorType.VARCHAR).addNullable("dir10", MinorType.VARCHAR).buildSchema();
    RowSet rowSet;
    if (SCHEMA_BATCH_ENABLED) {
        // First batch is empty; just carries the schema.
        assertTrue(iter.hasNext());
        rowSet = iter.next();
        RowSetUtilities.verify(new RowSetBuilder(client.allocator(), expectedSchema).build(), rowSet);
    }
    // Read the two batches.
    for (int i = 0; i < 2; i++) {
        assertTrue(iter.hasNext());
        rowSet = iter.next();
        // Figure out which record this is and test accordingly.
        RowSetReader reader = rowSet.reader();
        assertTrue(reader.next());
        String aCol = reader.scalar("a").getString();
        if (aCol.equals("10")) {
            RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null, null, null, null).build();
            RowSetUtilities.verify(expected, rowSet);
        } else {
            RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR, null, NESTED_DIR, null).build();
            RowSetUtilities.verify(expected, rowSet);
        }
    }
    assertFalse(iter.hasNext());
}
Also used : RowSetBuilder(org.apache.drill.exec.physical.rowSet.RowSetBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) DirectRowSet(org.apache.drill.exec.physical.rowSet.DirectRowSet) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) DirectRowSet(org.apache.drill.exec.physical.rowSet.DirectRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) Test(org.junit.Test) EvfTest(org.apache.drill.categories.EvfTest)

Example 20 with RowSetReader

use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.

the class TestCsvWithHeaders method doTestExplicitPartitionsMultiFiles.

/**
 * Test using partition columns with partitioned files in V3. Although the
 * file is nested to one level, both dir0 and dir1 are nullable VARCHAR.
 * See {@link TestPartitionRace} to show that the types and schemas
 * are consistent even when used across multiple scans.
 */
@Test
public void doTestExplicitPartitionsMultiFiles() {
    String sql = "SELECT a, b, c, dir0, dir1 FROM `dfs.data`.`%s`";
    Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
    TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).addNullable("dir1", MinorType.VARCHAR).buildSchema();
    RowSet rowSet;
    if (SCHEMA_BATCH_ENABLED) {
        // First batch is empty; just carries the schema.
        assertTrue(iter.hasNext());
        rowSet = iter.next();
        RowSetUtilities.verify(new RowSetBuilder(client.allocator(), expectedSchema).build(), rowSet);
    }
    // Read the two batches.
    for (int i = 0; i < 2; i++) {
        assertTrue(iter.hasNext());
        rowSet = iter.next();
        // Figure out which record this is and test accordingly.
        RowSetReader reader = rowSet.reader();
        assertTrue(reader.next());
        String aCol = reader.scalar("a").getString();
        if (aCol.equals("10")) {
            RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null, null).build();
            RowSetUtilities.verify(expected, rowSet);
        } else {
            RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR, null).build();
            RowSetUtilities.verify(expected, rowSet);
        }
    }
    assertFalse(iter.hasNext());
}
Also used : RowSetBuilder(org.apache.drill.exec.physical.rowSet.RowSetBuilder) TupleMetadata(org.apache.drill.exec.record.metadata.TupleMetadata) DirectRowSet(org.apache.drill.exec.physical.rowSet.DirectRowSet) SchemaBuilder(org.apache.drill.exec.record.metadata.SchemaBuilder) DirectRowSet(org.apache.drill.exec.physical.rowSet.DirectRowSet) RowSet(org.apache.drill.exec.physical.rowSet.RowSet) RowSetReader(org.apache.drill.exec.physical.rowSet.RowSetReader) Test(org.junit.Test) EvfTest(org.apache.drill.categories.EvfTest)

Aggregations

RowSetReader (org.apache.drill.exec.physical.rowSet.RowSetReader)33 Test (org.junit.Test)26 RowSet (org.apache.drill.exec.physical.rowSet.RowSet)25 TupleMetadata (org.apache.drill.exec.record.metadata.TupleMetadata)22 SchemaBuilder (org.apache.drill.exec.record.metadata.SchemaBuilder)21 SubOperatorTest (org.apache.drill.test.SubOperatorTest)16 ResultSetLoader (org.apache.drill.exec.physical.resultSet.ResultSetLoader)15 RowSetLoader (org.apache.drill.exec.physical.resultSet.RowSetLoader)15 DirectRowSet (org.apache.drill.exec.physical.rowSet.DirectRowSet)11 SingleRowSet (org.apache.drill.exec.physical.rowSet.RowSet.SingleRowSet)10 ArrayReader (org.apache.drill.exec.vector.accessor.ArrayReader)10 ScalarWriter (org.apache.drill.exec.vector.accessor.ScalarWriter)10 EvfTest (org.apache.drill.categories.EvfTest)8 VectorContainer (org.apache.drill.exec.record.VectorContainer)7 ResultSetOptions (org.apache.drill.exec.physical.resultSet.impl.ResultSetLoaderImpl.ResultSetOptions)6 RowSetBuilder (org.apache.drill.exec.physical.rowSet.RowSetBuilder)6 TupleReader (org.apache.drill.exec.vector.accessor.TupleReader)6 ScalarReader (org.apache.drill.exec.vector.accessor.ScalarReader)4 TupleWriter (org.apache.drill.exec.vector.accessor.TupleWriter)4 BatchSchema (org.apache.drill.exec.record.BatchSchema)3