use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class ParquetRecordReaderTest method testLimitMultipleRowGroups.
@Test
public void testLimitMultipleRowGroups() throws Exception {
HashMap<String, FieldInfo> fields = new HashMap<>();
ParquetTestProperties props = new ParquetTestProperties(3, 100, 1024 * 1024, fields);
populateFieldInfoMap(props);
TestFileGenerator.generateParquetFile("/tmp/testLimit.parquet", props);
List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM dfs.`/tmp/testLimit.parquet` LIMIT 225");
int recordsInOutput = 0;
for (QueryDataBatch batch : results) {
recordsInOutput += batch.getHeader().getDef().getRecordCount();
batch.release();
}
assertTrue(String.format("Number of records in output is wrong: expected=%d, actual=%s", 225, recordsInOutput), 225 == recordsInOutput);
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class ParquetRecordReaderTest method testLimit.
@Test
public void testLimit() throws Exception {
List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM cp.`parquet/tpch/nation/01.parquet` LIMIT 1");
int recordsInOutput = 0;
for (QueryDataBatch batch : results) {
recordsInOutput += batch.getHeader().getDef().getRecordCount();
batch.release();
}
assertTrue(String.format("Number of records in output is wrong: expected=%d, actual=%s", 1, recordsInOutput), 1 == recordsInOutput);
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class ParquetRecordReaderTest method testNullableFilter.
@Test
public void testNullableFilter() throws Exception {
final List<QueryDataBatch> result = testSqlWithResults("select count(wr_return_quantity) as row_count from dfs.`tmp/web_returns` where wr_return_quantity = 1");
assertEquals("Only expected one batch with data, and then the empty finishing batch.", 2, result.size());
final RecordBatchLoader loader = new RecordBatchLoader(getDrillbitContext().getAllocator());
final QueryDataBatch b = result.get(0);
loader.load(b.getHeader().getDef(), b.getData());
final VectorWrapper vw = loader.getValueAccessorById(BigIntVector.class, loader.getValueVectorId(SchemaPath.getCompoundPath("row_count")).getFieldIds());
assertEquals(3573l, vw.getValueVector().getAccessor().getObject(0));
b.release();
loader.clear();
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class ParquetRecordReaderTest method testLimitBeyondRowCount.
@Test
public void testLimitBeyondRowCount() throws Exception {
List<QueryDataBatch> results = testSqlWithResults("SELECT * FROM cp.`parquet/tpch/nation/01.parquet` LIMIT 100");
int recordsInOutput = 0;
for (QueryDataBatch batch : results) {
recordsInOutput += batch.getHeader().getDef().getRecordCount();
batch.release();
}
assertTrue(String.format("Number of records in output is wrong: expected=%d, actual=%s", 9, recordsInOutput), 9 == recordsInOutput);
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class TestHashJoin method hjWithExchange1.
@Test
public void hjWithExchange1() throws Throwable {
// Another test for hash join with exchanges
try (final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
final Drillbit bit = new Drillbit(CONFIG, serviceSet);
final DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
// run query.
bit.run();
client.connect();
final List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.asCharSource(DrillFileUtils.getResourceAsFile("/join/hj_exchanges1.json"), Charsets.UTF_8).read());
int count = 0;
for (final QueryDataBatch b : results) {
if (b.getHeader().getRowCount() != 0) {
count += b.getHeader().getRowCount();
}
b.release();
}
assertEquals(272, count);
}
}
Aggregations