use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class DrillTestWrapper method addToHyperVectorMap.
private Map<String, HyperVectorValueIterator> addToHyperVectorMap(final List<QueryDataBatch> records, final RecordBatchLoader loader) throws SchemaChangeException, UnsupportedEncodingException {
// TODO - this does not handle schema changes
Map<String, HyperVectorValueIterator> combinedVectors = new TreeMap<>();
long totalRecords = 0;
QueryDataBatch batch;
int size = records.size();
for (int i = 0; i < size; i++) {
batch = records.get(i);
loader.load(batch.getHeader().getDef(), batch.getData());
logger.debug("reading batch with " + loader.getRecordCount() + " rows, total read so far " + totalRecords);
totalRecords += loader.getRecordCount();
for (VectorWrapper<?> w : loader) {
String field = SchemaPath.getSimplePath(w.getField().getName()).toExpr();
if (!combinedVectors.containsKey(field)) {
MaterializedField mf = w.getField();
ValueVector[] vvList = (ValueVector[]) Array.newInstance(mf.getValueClass(), 1);
vvList[0] = w.getValueVector();
combinedVectors.put(field, new HyperVectorValueIterator(mf, new HyperVectorWrapper<>(mf, vvList)));
} else {
combinedVectors.get(field).getHyperVector().addVector(w.getValueVector());
}
}
}
for (HyperVectorValueIterator hvi : combinedVectors.values()) {
hvi.determineTotalSize();
}
return combinedVectors;
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class DrillTestWrapper method compareUnorderedResults.
/**
* Use this method only if necessary to validate one query against another. If you are just validating against a
* baseline file use one of the simpler interfaces that will write the validation query for you.
*
* @throws Exception
*/
protected void compareUnorderedResults() throws Exception {
RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
List<QueryDataBatch> actual = Collections.emptyList();
List<QueryDataBatch> expected = Collections.emptyList();
List<Map<String, Object>> expectedRecords = new ArrayList<>();
List<Map<String, Object>> actualRecords = new ArrayList<>();
try {
test(testOptionSettingQueries);
actual = testRunAndReturn(queryType, query);
checkNumBatches(actual);
addTypeInfoIfMissing(actual.get(0), testBuilder);
addToMaterializedResults(actualRecords, actual, loader);
// the cases where the baseline is stored in a file.
if (baselineRecords == null) {
test(baselineOptionSettingQueries);
expected = testRunAndReturn(baselineQueryType, testBuilder.getValidationQuery());
addToMaterializedResults(expectedRecords, expected, loader);
} else {
expectedRecords = baselineRecords;
}
compareResults(expectedRecords, actualRecords);
} finally {
cleanupBatches(actual, expected);
}
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class QueryBuilder method rowSet.
/**
* Run the query and return the first non-empty batch as a
* {@link DirectRowSet} object that can be inspected directly
* by the code using a {@link RowSetReader}.
* <p>
*
* @see {@link #rowSetIterator()} for a version that reads a series of
* batches as row sets.
* @return a row set that represents the first non-empty batch returned from
* the query
* @throws RpcException if anything goes wrong
*/
public DirectRowSet rowSet() throws RpcException {
// Ignore all but the first non-empty batch.
QueryDataBatch dataBatch = null;
for (QueryDataBatch batch : results()) {
if (dataBatch == null && batch.getHeader().getRowCount() != 0) {
dataBatch = batch;
} else {
batch.release();
}
}
if (dataBatch == null) {
return null;
}
// Unload the batch and convert to a row set.
final RecordBatchLoader loader = new RecordBatchLoader(client.allocator());
try {
loader.load(dataBatch.getHeader().getDef(), dataBatch.getData());
dataBatch.release();
VectorContainer container = loader.getContainer();
container.setRecordCount(loader.getRecordCount());
return DirectRowSet.fromContainer(container);
} catch (SchemaChangeException e) {
throw new IllegalStateException(e);
}
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class QueryTestUtil method createClient.
/**
* Create a DrillClient that can be used to query a drill cluster.
*
* @param drillConfig
* @param remoteServiceSet remote service set
* @param maxWidth maximum width per node
* @param props Connection properties contains properties such as "user", "password", "schema" etc
* @return the newly created client
* @throws RpcException if there is a problem setting up the client
*/
public static DrillClient createClient(final DrillConfig drillConfig, final RemoteServiceSet remoteServiceSet, final int maxWidth, final Properties props) throws RpcException, OutOfMemoryException {
final DrillClient drillClient = new DrillClient(drillConfig, remoteServiceSet.getCoordinator());
drillClient.connect(props);
final List<QueryDataBatch> results = drillClient.runQuery(QueryType.SQL, String.format("alter session set `%s` = %d", ExecConstants.MAX_WIDTH_PER_NODE_KEY, maxWidth));
for (QueryDataBatch queryDataBatch : results) {
queryDataBatch.release();
}
return drillClient;
}
use of org.apache.drill.exec.rpc.user.QueryDataBatch in project drill by axbaretto.
the class BaseTestQuery method printResult.
protected int printResult(List<QueryDataBatch> results) throws SchemaChangeException {
int rowCount = 0;
final RecordBatchLoader loader = new RecordBatchLoader(getAllocator());
for (final QueryDataBatch result : results) {
rowCount += result.getHeader().getRowCount();
loader.load(result.getHeader().getDef(), result.getData());
// TODO: Clean: DRILL-2933: That load(...) no longer throws
// SchemaChangeException, so check/clean throw clause above.
VectorUtil.showVectorAccessibleContent(loader, columnWidths);
loader.clear();
result.release();
}
System.out.println("Total record count: " + rowCount);
return rowCount;
}
Aggregations