use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class TestParquetPhysicalPlan method testParseParquetPhysicalPlan.
@Test
@Ignore
public void testParseParquetPhysicalPlan() throws Exception {
final StringBuilder sb = new StringBuilder();
RemoteServiceSet serviceSet = RemoteServiceSet.getLocalServiceSet();
DrillConfig config = DrillConfig.create();
try (Drillbit bit1 = new Drillbit(config, serviceSet);
DrillClient client = new DrillClient(config, serviceSet.getCoordinator())) {
bit1.run();
client.connect();
List<QueryDataBatch> results = client.runQuery(org.apache.drill.exec.proto.UserBitShared.QueryType.PHYSICAL, Resources.toString(Resources.getResource(fileName), Charsets.UTF_8));
RecordBatchLoader loader = new RecordBatchLoader(bit1.getContext().getAllocator());
int count = 0;
for (QueryDataBatch b : results) {
sb.append(String.format("Got %d results\n", b.getHeader().getRowCount()));
count += b.getHeader().getRowCount();
loader.load(b.getHeader().getDef(), b.getData());
for (VectorWrapper vw : loader) {
sb.append(vw.getValueVector().getField().getName() + ": ");
ValueVector vv = vw.getValueVector();
for (int i = 0; i < vv.getAccessor().getValueCount(); i++) {
Object o = vv.getAccessor().getObject(i);
if (o instanceof byte[]) {
sb.append(" [" + new String((byte[]) o) + "]");
} else {
sb.append(" [" + vv.getAccessor().getObject(i) + "]");
}
}
sb.append('\n');
}
loader.clear();
b.release();
}
client.close();
sb.append(String.format("Got %d total results\n", count));
}
logger.debug(sb.toString());
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class TestJsonReaderWithSparseFiles method query.
protected void query(String query, Function<RecordBatchLoader> testBody) throws Exception {
List<QueryDataBatch> batches = testSqlWithResults(query);
RecordBatchLoader loader = new RecordBatchLoader(client.getAllocator());
try {
// first batch at index 0 is empty and used for fast schema return. Load the second one for the tests
QueryDataBatch batch = batches.get(0);
loader.load(batch.getHeader().getDef(), batch.getData());
testBody.apply(loader);
} finally {
for (QueryDataBatch batch : batches) {
batch.release();
}
loader.clear();
}
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class TestSimpleExternalSort method sortOneKeyDescendingMergeSort.
@Test
public void sortOneKeyDescendingMergeSort() throws Throwable {
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher);
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
List<QueryDataBatch> results = client.queryBuilder().physicalResource("xsort/one_key_sort_descending.json").results();
assertEquals(1_000_000, client.countResults(results));
validateResults(client.allocator(), results);
}
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class TestSimpleExternalSort method outOfMemoryExternalSort.
@Test
public void outOfMemoryExternalSort() throws Throwable {
ClusterFixtureBuilder builder = ClusterFixture.builder(dirTestWatcher).configProperty("drill.memory.fragment.max", 50_000_000).configProperty("drill.memory.fragment.initial", 2_000_000).configProperty("drill.memory.operator.max", 30_000_000).configProperty("drill.memory.operator.initial", 2_000_000);
try (ClusterFixture cluster = builder.build();
ClientFixture client = cluster.clientFixture()) {
List<QueryDataBatch> results = client.queryBuilder().physicalResource("/xsort/oom_sort_test.json").results();
assertEquals(10_000_000, client.countResults(results));
long previousBigInt = Long.MAX_VALUE;
for (QueryDataBatch b : results) {
RecordBatchLoader loader = new RecordBatchLoader(client.allocator());
if (b.getHeader().getRowCount() > 0) {
loader.load(b.getHeader().getDef(), b.getData());
BigIntVector c1 = (BigIntVector) loader.getValueAccessorById(BigIntVector.class, loader.getValueVectorId(new SchemaPath("blue", ExpressionPosition.UNKNOWN)).getFieldIds()).getValueVector();
BigIntVector.Accessor a1 = c1.getAccessor();
for (int i = 0; i < c1.getAccessor().getValueCount(); i++) {
assertTrue(String.format("%d < %d", previousBigInt, a1.get(i)), previousBigInt >= a1.get(i));
previousBigInt = a1.get(i);
}
assertTrue(String.format("%d == %d", a1.get(0), a1.get(a1.getValueCount() - 1)), a1.get(0) != a1.get(a1.getValueCount() - 1));
}
loader.clear();
b.release();
}
}
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by apache.
the class QueryBatchIterator method loadBatch.
private boolean loadBatch(QueryEvent event) {
batchCount++;
recordCount += event.batch.getHeader().getRowCount();
QueryDataBatch inputBatch = Preconditions.checkNotNull(event.batch);
// Unload the batch and convert to a row set.
loader.load(inputBatch.getHeader().getDef(), inputBatch.getData());
inputBatch.release();
VectorContainer batch = loader.getContainer();
batch.setRecordCount(loader.getRecordCount());
// set which has a schema, but no rows.
if (batch.getRecordCount() == 0 && batch.getNumberOfColumns() == 0) {
release();
return false;
}
if (state == State.START || batch.isSchemaChanged()) {
schemaVersion++;
}
state = State.RUN;
return true;
}
Aggregations