use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestPartitionRace method testSingleScan.
/**
* Oddly, when run in a single fragment, the files occur in a
* stable order, the partition always appears, and it appears in
* the first column position.
*/
@Test
public void testSingleScan() throws IOException {
String sql = "SELECT * FROM `dfs.data`.`%s`";
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).buildSchema();
Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
assertEquals(0, rowSet.rowCount());
rowSet.clear();
}
for (int j = 0; j < 2; j++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
String col1 = reader.scalar("a").getString();
if (col1.equals("10")) {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar", null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("20", "fred", "wilma", NESTED_DIR).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestLogReaderIssue method testIssue7853UseValidDatetimeFormat.
@Test
public void testIssue7853UseValidDatetimeFormat() throws Exception {
String sql = "SELECT type, `time` FROM `dfs.data`.`root/issue7853.log`";
QueryBuilder builder = client.queryBuilder().sql(sql);
RowSet sets = builder.rowSet();
TupleMetadata schema = new SchemaBuilder().addNullable("type", MinorType.VARCHAR).addNullable("time", MinorType.TIMESTAMP).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), schema).addRow("h2", 1611446100664L).addRow("h2", 1611446100666L).build();
new RowSetComparison(expected).verifyAndClearAll(sets);
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithHeaders method testColumnsCol.
/**
* The column name `columns` is treated as a plain old
* column when using column headers.
*/
@Test
public void testColumnsCol() throws IOException {
String sql = "SELECT author, columns FROM `dfs.data`.`%s`";
RowSet actual = client.queryBuilder().sql(sql, COLUMNS_FILE_NAME).rowSet();
TupleMetadata expectedSchema = new SchemaBuilder().add("author", MinorType.VARCHAR).add("columns", MinorType.VARCHAR).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("fred", "Rocks Today,Dino Wrangling").addRow("barney", "Bowlarama").build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithHeaders method testCsvHeadersCaseInsensitive.
// Test fix for DRILL-5590
@Test
public void testCsvHeadersCaseInsensitive() throws IOException {
String sql = "SELECT A, b, C FROM `dfs.data`.`%s`";
RowSet actual = client.queryBuilder().sql(sql, TEST_FILE_NAME).rowSet();
TupleMetadata expectedSchema = new SchemaBuilder().add("A", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("C", MinorType.VARCHAR).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "foo", "bar").build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithHeaders method testRaggedRows.
/**
* Test that ragged rows result in the "missing" columns being filled
* in with the moral equivalent of a null column for CSV: a blank string.
*/
@Test
public void testRaggedRows() throws IOException {
String fileName = "case4.csv";
buildFile(fileName, raggedRows);
RowSet actual = client.queryBuilder().sql(makeStatement(fileName)).rowSet();
TupleMetadata expectedSchema = new SchemaBuilder().add("a", MinorType.VARCHAR).add("b", MinorType.VARCHAR).add("c", MinorType.VARCHAR).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow("10", "dino", "").addRow("20", "foo", "bar").addRow("30", "", "").build();
RowSetUtilities.verify(expected, actual);
}
Aggregations