use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestMergingReceiver method twoBitTwoExchange.
// private static final org.slf4j.Logger logger = org.slf4j.LoggerFactory.getLogger(TestMergingReceiver.class);
@Test
public void twoBitTwoExchange() throws Exception {
@SuppressWarnings("resource") final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
try (final Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
final Drillbit bit2 = new Drillbit(CONFIG, serviceSet);
final DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
bit1.run();
bit2.run();
client.connect();
final List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/mergerecv/merging_receiver.json"), Charsets.UTF_8));
int count = 0;
final RecordBatchLoader batchLoader = new RecordBatchLoader(client.getAllocator());
// print the results
for (final QueryDataBatch b : results) {
final QueryData queryData = b.getHeader();
final int rowCount = queryData.getRowCount();
count += rowCount;
// loaded but not used, just to test
batchLoader.load(queryData.getDef(), b.getData());
b.release();
batchLoader.clear();
}
assertEquals(200000, count);
}
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestMergingReceiver method testMultipleProvidersMixedSizes.
@Test
public void testMultipleProvidersMixedSizes() throws Exception {
@SuppressWarnings("resource") final RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
try (final Drillbit bit1 = new Drillbit(CONFIG, serviceSet);
final Drillbit bit2 = new Drillbit(CONFIG, serviceSet);
final DrillClient client = new DrillClient(CONFIG, serviceSet.getCoordinator())) {
bit1.run();
bit2.run();
client.connect();
final List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Files.toString(FileUtils.getResourceAsFile("/mergerecv/multiple_providers.json"), Charsets.UTF_8));
int count = 0;
final RecordBatchLoader batchLoader = new RecordBatchLoader(client.getAllocator());
// print the results
Long lastBlueValue = null;
for (final QueryDataBatch b : results) {
final QueryData queryData = b.getHeader();
final int batchRowCount = queryData.getRowCount();
count += batchRowCount;
batchLoader.load(queryData.getDef(), b.getData());
for (final VectorWrapper<?> vw : batchLoader) {
@SuppressWarnings("resource") final ValueVector vv = vw.getValueVector();
final ValueVector.Accessor va = vv.getAccessor();
final MaterializedField materializedField = vv.getField();
final int numValues = va.getValueCount();
for (int valueIdx = 0; valueIdx < numValues; ++valueIdx) {
if (materializedField.getPath().equals("blue")) {
final long longValue = ((Long) va.getObject(valueIdx)).longValue();
// check that order is ascending
if (lastBlueValue != null) {
assertTrue(longValue >= lastBlueValue);
}
lastBlueValue = longValue;
}
}
}
b.release();
batchLoader.clear();
}
assertEquals(400000, count);
}
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestJsonReader method testProjectPushdown.
// The project pushdown rule is correctly adding the projected columns to the scan, however it is not removing
// the redundant project operator after the scan, this tests runs a physical plan generated from one of the tests to
// ensure that the project is filtering out the correct data in the scan alone
@Test
public void testProjectPushdown() throws Exception {
String[] queries = { Files.toString(FileUtils.getResourceAsFile("/store/json/project_pushdown_json_physical_plan.json"), Charsets.UTF_8) };
long[] rowCounts = { 3 };
String filename = "/store/json/schema_change_int_to_string.json";
test("alter system set `store.json.all_text_mode` = false");
runTestsOnFile(filename, UserBitShared.QueryType.PHYSICAL, queries, rowCounts);
List<QueryDataBatch> results = testPhysicalWithResults(queries[0]);
assertEquals(1, results.size());
// "`field_1`", "`field_3`.`inner_1`", "`field_3`.`inner_2`", "`field_4`.`inner_1`"
RecordBatchLoader batchLoader = new RecordBatchLoader(getAllocator());
QueryDataBatch batch = results.get(0);
assertTrue(batchLoader.load(batch.getHeader().getDef(), batch.getData()));
// this used to be five. It is now three. This is because the plan doesn't have a project.
// Scanners are not responsible for projecting non-existent columns (as long as they project one column)
assertEquals(3, batchLoader.getSchema().getFieldCount());
testExistentColumns(batchLoader);
batch.release();
batchLoader.clear();
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestJsonReaderWithSparseFiles method query.
protected void query(final String query, final Function<RecordBatchLoader> testBody) throws Exception {
final List<QueryDataBatch> batches = testSqlWithResults(query);
final RecordBatchLoader loader = new RecordBatchLoader(client.getAllocator());
try {
// first batch at index 0 is empty and used for fast schema return. Load the second one for the tests
final QueryDataBatch batch = batches.get(0);
loader.load(batch.getHeader().getDef(), batch.getData());
testBody.apply(loader);
} finally {
for (final QueryDataBatch batch : batches) {
batch.release();
}
loader.clear();
}
}
use of org.apache.drill.exec.record.RecordBatchLoader in project drill by apache.
the class TestParquetPhysicalPlan method testParseParquetPhysicalPlan.
@Test
@Ignore
public void testParseParquetPhysicalPlan() throws Exception {
RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
DrillConfig config = DrillConfig.create();
try (Drillbit bit1 = new Drillbit(config, serviceSet);
DrillClient client = new DrillClient(config, serviceSet.getCoordinator())) {
bit1.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Resources.toString(Resources.getResource(fileName), Charsets.UTF_8));
RecordBatchLoader loader = new RecordBatchLoader(bit1.getContext().getAllocator());
int count = 0;
for (QueryDataBatch b : results) {
System.out.println(String.format("Got %d results", b.getHeader().getRowCount()));
count += b.getHeader().getRowCount();
loader.load(b.getHeader().getDef(), b.getData());
for (VectorWrapper vw : loader) {
System.out.print(vw.getValueVector().getField().getPath() + ": ");
ValueVector vv = vw.getValueVector();
for (int i = 0; i < vv.getAccessor().getValueCount(); i++) {
Object o = vv.getAccessor().getObject(i);
if (o instanceof byte[]) {
System.out.print(" [" + new String((byte[]) o) + "]");
} else {
System.out.print(" [" + vv.getAccessor().getObject(i) + "]");
}
// break;
}
System.out.println();
}
loader.clear();
b.release();
}
client.close();
System.out.println(String.format("Got %d total results", count));
}
}
Aggregations