use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class TestJsonReader method testProjectPushdown.
// The project pushdown rule is correctly adding the projected columns to the scan, however it is not removing
// the redundant project operator after the scan, this tests runs a physical plan generated from one of the tests to
// ensure that the project is filtering out the correct data in the scan alone
@Test
public void testProjectPushdown() throws Exception {
String[] queries = { Files.toString(DrillFileUtils.getResourceAsFile("/store/json/project_pushdown_json_physical_plan.json"), Charsets.UTF_8) };
long[] rowCounts = { 3 };
String filename = "/store/json/schema_change_int_to_string.json";
test("alter system set `store.json.all_text_mode` = false");
runTestsOnFile(filename, UserBitShared.QueryType.PHYSICAL, queries, rowCounts);
List<QueryDataBatch> results = testPhysicalWithResults(queries[0]);
assertEquals(1, results.size());
// "`field_1`", "`field_3`.`inner_1`", "`field_3`.`inner_2`", "`field_4`.`inner_1`"
RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
QueryDataBatch batch = results.get(0);
assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
// this used to be five. It is now three. This is because the plan doesn't have a project.
// Scanners are not responsible for projecting non-existent columns (as long as they project one column)
assertEquals(3, batchLoader.getSchema().getFieldCount());
testExistentColumns(batchLoader);
batch.release();
batchLoader.clear();
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class TestJsonReader method readComplexWithStar.
@Test
public void readComplexWithStar() throws Exception {
List<QueryDataBatch> results = testSqlWithResults("select * from cp.`store/json/test_complex_read_with_star.json`");
assertEquals(1, results.size());
RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
QueryDataBatch batch = results.get(0);
assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
assertEquals(3, batchLoader.getSchema().getFieldCount());
testExistentColumns(batchLoader);
batch.release();
batchLoader.clear();
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class TestSpoolingBuffer method testMultipleExchangesSingleThread.
@Test
public void testMultipleExchangesSingleThread() throws Exception {
RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
DrillConfig conf = DrillConfig.create("drill-spool-test-module.conf");
try (Drillbit bit1 = new Drillbit(conf, serviceSet);
DrillClient client = new DrillClient(conf, serviceSet.getCoordinator())) {
bit1.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(DrillFileUtils.getResourceAsFile("/work/batch/multiple_exchange.json"), Charsets.UTF_8));
int count = 0;
for (QueryDataBatch b : results) {
if (b.getHeader().getRowCount() != 0) {
count += b.getHeader().getRowCount();
}
b.release();
}
assertEquals(500024, count);
}
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class ParquetRecordReaderTest method testLimitMultipleRowGroupsBeyondRowCount.
@Test
public void testLimitMultipleRowGroupsBeyondRowCount() throws Exception {
HashMap<String, FieldInfo> fields = new HashMap<>();
ParquetTestProperties props = new ParquetTestProperties(3, 100, 1024 * 1024, fields);
populateFieldInfoMap(props);
TestFileGenerator.generateParquetFile("/tmp/testLimit.parquet", props);
List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM dfs.`/tmp/testLimit.parquet` LIMIT 500");
int recordsInOutput = 0;
for (QueryDataBatch batch : results) {
recordsInOutput += batch.getHeader().getDef().getRecordCount();
batch.release();
}
assertTrue(String.format("Number of records in output is wrong: expected=%d, actual=%s", 300, recordsInOutput), 300 == recordsInOutput);
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class ParquetRecordReaderTest method testLimitBeyondRowCount.
@Test
public void testLimitBeyondRowCount() throws Exception {
List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM cp.`parquet/tpch/nation/01.parquet` LIMIT 100");
int recordsInOutput = 0;
for (QueryDataBatch batch : results) {
recordsInOutput += batch.getHeader().getDef().getRecordCount();
batch.release();
}
assertTrue(String.format("Number of records in output is wrong: expected=%d, actual=%s", 9, recordsInOutput), 9 == recordsInOutput);
}
Aggregations