use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestCsvWithoutHeaders method testHugeColumn.
@Test
public void testHugeColumn() throws IOException {
String fileName = buildBigColFile(false);
String sql = "SELECT * FROM `dfs.data`.`%s`";
RowSet actual = client.queryBuilder().sql(sql, fileName).rowSet();
assertEquals(10, actual.rowCount());
RowSetReader reader = actual.reader();
ArrayReader arrayReader = reader.array(0);
while (reader.next()) {
int i = reader.logicalIndex();
arrayReader.next();
assertEquals(Integer.toString(i + 1), arrayReader.scalar().getString());
arrayReader.next();
String big = arrayReader.scalar().getString();
assertEquals(BIG_COL_SIZE, big.length());
for (int j = 0; j < BIG_COL_SIZE; j++) {
assertEquals((char) ((j + i) % 26 + 'A'), big.charAt(j));
}
arrayReader.next();
assertEquals(Integer.toString((i + 1) * 10), arrayReader.scalar().getString());
}
actual.clear();
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestCsvWithoutHeaders method testPartitionExpansion.
/**
* Test partition expansion.
* <p>
* V3, as in V2 before Drill 1.12, puts partition columns after
* data columns (so that data columns don't shift positions if
* files are nested to another level.)
*/
@Test
public void testPartitionExpansion() throws IOException {
String sql = "SELECT * FROM `dfs.data`.`%s`";
Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
TupleMetadata expectedSchema = new SchemaBuilder().addArray("columns", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).buildSchema();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
assertEquals(0, rowSet.rowCount());
rowSet.clear();
}
// Read the two data batches.
for (int i = 0; i < 2; i++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
ArrayReader ar = reader.array(0);
assertTrue(ar.next());
String col1 = ar.scalar().getString();
if (col1.equals("10")) {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow(strArray("10", "foo", "bar"), null).addRow(strArray("20", "fred", "wilma"), null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow(strArray("30", "barney", "betty"), NESTED_DIR).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestPartitionRace method testSingleScan.
/**
* Oddly, when run in a single fragment, the files occur in a
* stable order, the partition always appears, and it appears in
* the first column position.
*/
@Test
public void testSingleScan() throws IOException {
String sql = "SELECT * FROM `dfs.data`.`%s`";
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).buildSchema();
Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
assertEquals(0, rowSet.rowCount());
rowSet.clear();
}
for (int j = 0; j < 2; j++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
String col1 = reader.scalar("a").getString();
if (col1.equals("10")) {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class DrillSeparatePlanningTest method getResultsHelper.
private int getResultsHelper(final QueryPlanFragments planFragments) throws Exception {
int totalRows = 0;
for (PlanFragment fragment : planFragments.getFragmentsList()) {
DrillbitEndpoint assignedNode = fragment.getAssignment();
ClientFixture fragmentClient = cluster.client(assignedNode.getAddress(), assignedNode.getUserPort());
RowSet rowSet = fragmentClient.queryBuilder().sql("select hostname, user_port from sys.drillbits where `current`=true").rowSet();
assertEquals(1, rowSet.rowCount());
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
String host = reader.scalar("hostname").getString();
int port = reader.scalar("user_port").getInt();
rowSet.clear();
assertEquals(assignedNode.getAddress(), host);
assertEquals(assignedNode.getUserPort(), port);
List<PlanFragment> fragmentList = Lists.newArrayList();
fragmentList.add(fragment);
QuerySummary summary = fragmentClient.queryBuilder().plan(fragmentList).run();
totalRows += summary.recordCount();
fragmentClient.close();
}
return totalRows;
}
use of org.apache.drill.exec.physical.rowSet.RowSetReader in project drill by apache.
the class TestCsvWithHeaders method testPartitionExpansion.
/**
* Test partition expansion.
* <p>
* This test is tricky because it will return two data batches
* (preceded by an empty schema batch.) File read order is random
* so we have to expect the files in either order.
* <p>
* V3 puts partition columns after
* data columns (so that data columns don't shift positions if
* files are nested to another level.)
*/
@Test
public void testPartitionExpansion() {
Iterator<DirectRowSet> iter = client.queryBuilder().sql(makeStatement(PART_DIR)).rowSetIterator();
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).buildSchema();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
assertEquals(0, rowSet.rowCount());
rowSet.clear();
}
// Read the other two batches.
for (int i = 0; i < 2; i++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
String col1 = reader.scalar(0).getString();
if (col1.equals("10")) {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
}
Aggregations