use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithoutHeaders method testColumnsAndMetadata.
@Test
public void testColumnsAndMetadata() throws IOException {
String sql = "SELECT columns, filename FROM `dfs.data`.`%s`";
RowSet actual = client.queryBuilder().sql(sql, TEST_FILE_NAME).rowSet();
TupleMetadata expectedSchema = new SchemaBuilder().addArray("columns", MinorType.VARCHAR).add("filename", MinorType.VARCHAR).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow(strArray("10", "foo", "bar"), TEST_FILE_NAME).addRow(strArray("20", "fred", "wilma"), TEST_FILE_NAME).build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithoutHeaders method testWildcard.
/**
* Verify that the wildcard expands to the `columns` array
*/
@Test
public void testWildcard() throws IOException {
String sql = "SELECT * FROM `dfs.data`.`%s`";
RowSet actual = client.queryBuilder().sql(sql, TEST_FILE_NAME).rowSet();
TupleMetadata expectedSchema = new SchemaBuilder().addArray("columns", MinorType.VARCHAR).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addSingleCol(strArray("10", "foo", "bar")).addSingleCol(strArray("20", "fred", "wilma")).build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithoutHeaders method testRaggedRows.
@Test
public void testRaggedRows() throws IOException {
String fileName = "ragged.csv";
buildFile(fileName, raggedRows);
String sql = "SELECT columns FROM `dfs.data`.`%s`";
RowSet actual = client.queryBuilder().sql(sql, fileName).rowSet();
TupleMetadata expectedSchema = new SchemaBuilder().addArray("columns", MinorType.VARCHAR).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addSingleCol(strArray("10", "dino")).addSingleCol(strArray("20", "foo", "bar")).addSingleCol(strArray("30")).build();
RowSetUtilities.verify(expected, actual);
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithoutHeaders method testPartitionExpansion.
/**
* Test partition expansion.
* <p>
* V3, as in V2 before Drill 1.12, puts partition columns after
* data columns (so that data columns don't shift positions if
* files are nested to another level.)
*/
@Test
public void testPartitionExpansion() throws IOException {
String sql = "SELECT * FROM `dfs.data`.`%s`";
Iterator<DirectRowSet> iter = client.queryBuilder().sql(sql, PART_DIR).rowSetIterator();
TupleMetadata expectedSchema = new SchemaBuilder().addArray("columns", MinorType.VARCHAR).addNullable("dir0", MinorType.VARCHAR).buildSchema();
RowSet rowSet;
if (SCHEMA_BATCH_ENABLED) {
// First batch is empty; just carries the schema.
assertTrue(iter.hasNext());
rowSet = iter.next();
assertEquals(0, rowSet.rowCount());
rowSet.clear();
}
// Read the two data batches.
for (int i = 0; i < 2; i++) {
assertTrue(iter.hasNext());
rowSet = iter.next();
// Figure out which record this is and test accordingly.
RowSetReader reader = rowSet.reader();
assertTrue(reader.next());
ArrayReader ar = reader.array(0);
assertTrue(ar.next());
String col1 = ar.scalar().getString();
if (col1.equals("10")) {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow(strArray("10", "foo", "bar"), null).addRow(strArray("20", "fred", "wilma"), null).build();
RowSetUtilities.verify(expected, rowSet);
} else {
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addRow(strArray("30", "barney", "betty"), NESTED_DIR).build();
RowSetUtilities.verify(expected, rowSet);
}
}
assertFalse(iter.hasNext());
}
use of org.apache.drill.exec.physical.rowSet.RowSetBuilder in project drill by apache.
the class TestCsvWithoutHeaders method testColumns.
@Test
public void testColumns() throws IOException {
String sql = "SELECT columns FROM `dfs.data`.`%s`";
RowSet actual = client.queryBuilder().sql(sql, TEST_FILE_NAME).rowSet();
TupleMetadata expectedSchema = new SchemaBuilder().addArray("columns", MinorType.VARCHAR).buildSchema();
RowSet expected = new RowSetBuilder(client.allocator(), expectedSchema).addSingleCol(strArray("10", "foo", "bar")).addSingleCol(strArray("20", "fred", "wilma")).build();
RowSetUtilities.verify(expected, actual);
}
Aggregations