use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestResultSetLoaderProtocol method testOverwriteRow.
/**
* The writer protocol allows a client to write to a row any number of times
* before invoking {@code save()}. In this case, each new value simply
* overwrites the previous value. Here, we test the most basic case: a simple,
* flat tuple with no arrays. We use a very large Varchar that would, if
* overwrite were not working, cause vector overflow.
* <p>
* The ability to overwrite rows is seldom needed except in one future use
* case: writing a row, then applying a filter "in-place" to discard unwanted
* rows, without having to send the row downstream.
* <p>
* Because of this use case, specific rules apply when discarding row or
* overwriting values.
* <ul>
* <li>Values can be written once per row. Fixed-width columns actually allow
* multiple writes. But, because of the way variable-width columns work,
* multiple writes will cause undefined results.</li>
* <li>To overwrite a row, call <tt>start()</tt> without calling
* <tt>save()</tt> on the previous row. Doing so ignores data for the
* previous row and starts a new row in place of the old one.</li>
* </ul>
* Note that there is no explicit method to discard a row. Instead,
* the rule is that a row is not saved until <tt>save()</tt> is called.
*/
@Test
public void testOverwriteRow() {
TupleMetadata schema = new SchemaBuilder().add("a", MinorType.INT).add("b", MinorType.VARCHAR).buildSchema();
ResultSetLoaderImpl.ResultSetOptions options = new ResultSetOptionBuilder().readerSchema(schema).rowCountLimit(ValueVector.MAX_ROW_COUNT).build();
ResultSetLoader rsLoader = new ResultSetLoaderImpl(fixture.allocator(), options);
RowSetLoader rootWriter = rsLoader.writer();
// Can't use the shortcut to populate rows when doing overwrites.
ScalarWriter aWriter = rootWriter.scalar("a");
ScalarWriter bWriter = rootWriter.scalar("b");
// Write 100,000 rows, overwriting 99% of them. This will cause vector
// overflow and data corruption if overwrite does not work; but will happily
// produce the correct result if everything works as it should.
byte[] value = new byte[512];
Arrays.fill(value, (byte) 'X');
int count = 0;
rsLoader.startBatch();
while (count < 100_000) {
rootWriter.start();
count++;
aWriter.setInt(count);
bWriter.setBytes(value, value.length);
if (count % 100 == 0) {
rootWriter.save();
}
}
// Verify using a reader.
RowSet result = fixture.wrap(rsLoader.harvest());
assertEquals(count / 100, result.rowCount());
RowSetReader reader = result.reader();
int rowId = 1;
while (reader.next()) {
assertEquals(rowId * 100, reader.scalar("a").getInt());
assertTrue(Arrays.equals(value, reader.scalar("b").getBytes()));
rowId++;
}
result.clear();
rsLoader.close();
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestMockPlugin method testVarChar.
@Test
public void testVarChar() throws RpcException {
String sql = "SELECT name_s17 FROM `mock`.`employee_100`";
RowSet result = client.queryBuilder().sql(sql).rowSet();
TupleMetadata schema = result.schema();
assertEquals(1, schema.size());
ColumnMetadata col = schema.metadata(0);
assertEquals("name_s17", col.name());
assertEquals(MinorType.VARCHAR, col.type());
assertEquals(DataMode.REQUIRED, col.mode());
assertEquals(100, result.rowCount());
RowSetReader reader = result.reader();
while (reader.next()) {
assertEquals(17, reader.scalar(0).getString().length());
}
result.clear();
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestCsvWithHeaders method testHugeColumn.
@Test
public void testHugeColumn() throws IOException {
String fileName = buildBigColFile(true);
RowSet actual = client.queryBuilder().sql(makeStatement(fileName)).rowSet();
assertEquals(10, actual.rowCount());
RowSetReader reader = actual.reader();
while (reader.next()) {
int i = reader.logicalIndex();
assertEquals(Integer.toString(i + 1), reader.scalar(0).getString());
String big = reader.scalar(1).getString();
assertEquals(BIG_COL_SIZE, big.length());
for (int j = 0; j < BIG_COL_SIZE; j++) {
assertEquals((char) ((j + i) % 26 + 'A'), big.charAt(j));
}
assertEquals(Integer.toString((i + 1) * 10), reader.scalar(2).getString());
}
actual.clear();
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestCsvWithHeaders method testWildcardAndPartitionsMultiFiles.
/**
* Test the use of partition columns with the wildcard. This works for file
* metadata columns, but confuses the project operator when used for
* partition columns. DRILL-7080. Still broken in V3 because this appears
* to be a Project operator issue, not reader issue. Not that the
* partition column moves after data columns.
*/
@Test
public void testWildcardAndPartitionsMultiFiles() {
String sql = "SELECT *, dir0, dir1 FROM `dfs.data`.`%s`";
Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).addNullable("dir1", MinorType.VARCHAR).addNullable("dir00", MinorType.VARCHAR).addNullable("dir10", MinorType.VARCHAR).buildSchema();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
RowSetUtilities.verify(new RowSetBuilder(client.allocator(), expectedSchema).build(), rowSet);
}
// Read the two batches.
for (int i = 0; i < 2; i++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
String aCol = reader.scalar("a").getString();
if (aCol.equals("10")) {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null, null, null, null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR, null, NESTED_DIR, null).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestCsvWithHeaders method doTestExplicitPartitionsMultiFiles.
/**
* Test using partition columns with partitioned files in V3. Although the
* file is nested to one level, both dir0 and dir1 are nullable VARCHAR.
* See {@link TestPartitionRace} to show that the types and schemas
* are consistent even when used across multiple scans.
*/
@Test
public void doTestExplicitPartitionsMultiFiles() {
String sql = "SELECT a, b, c, dir0, dir1 FROM `dfs.data`.`%s`";
Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).addNullable("dir1", MinorType.VARCHAR).buildSchema();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
RowSetUtilities.verify(new RowSetBuilder(client.allocator(), expectedSchema).build(), rowSet);
}
// Read the two batches.
for (int i = 0; i < 2; i++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
String aCol = reader.scalar("a").getString();
if (aCol.equals("10")) {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null, null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR, null).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
}
Aggregations