use of com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock in project pinot by linkedin.
the class AggregationOperator method getNextBlock.
@Override
public Block getNextBlock() {
int numDocsScanned = 0;
// Perform aggregation on all the blocks.
AggregationExecutor aggregationExecutor = new DefaultAggregationExecutor(_aggregationFunctionContexts);
aggregationExecutor.init();
TransformBlock transformBlock;
while ((transformBlock = (TransformBlock) _transformOperator.nextBlock()) != null) {
numDocsScanned += transformBlock.getNumDocs();
aggregationExecutor.aggregate(transformBlock);
}
aggregationExecutor.finish();
// Create execution statistics.
long numEntriesScannedInFilter = _transformOperator.getExecutionStatistics().getNumEntriesScannedInFilter();
long numEntriesScannedPostFilter = numDocsScanned * _transformOperator.getNumProjectionColumns();
_executionStatistics = new ExecutionStatistics(numDocsScanned, numEntriesScannedInFilter, numEntriesScannedPostFilter, _numTotalRawDocs);
// Build intermediate result block based on aggregation result from the executor.
return new IntermediateResultsBlock(_aggregationFunctionContexts, aggregationExecutor.getResult(), false);
}
use of com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock in project pinot by linkedin.
the class MSelectionOnlyOperator method getNextBlock.
@Override
public Block getNextBlock() {
int numDocsScanned = 0;
ProjectionBlock projectionBlock;
while ((projectionBlock = (ProjectionBlock) _projectionOperator.nextBlock()) != null) {
for (int i = 0; i < _dataSchema.size(); i++) {
_blocks[i] = projectionBlock.getBlock(_dataSchema.getColumnName(i));
}
SelectionFetcher selectionFetcher = new SelectionFetcher(_blocks, _dataSchema);
DocIdSetBlock docIdSetBlock = projectionBlock.getDocIdSetBlock();
int numDocsToFetch = Math.min(docIdSetBlock.getSearchableLength(), _limitDocs - _rowEvents.size());
numDocsScanned += numDocsToFetch;
int[] docIdSet = docIdSetBlock.getDocIdSet();
for (int i = 0; i < numDocsToFetch; i++) {
_rowEvents.add(selectionFetcher.getRow(docIdSet[i]));
}
if (_rowEvents.size() == _limitDocs) {
break;
}
}
// Create execution statistics.
long numEntriesScannedInFilter = _projectionOperator.getExecutionStatistics().getNumEntriesScannedInFilter();
long numEntriesScannedPostFilter = numDocsScanned * _projectionOperator.getNumProjectionColumns();
long numTotalRawDocs = _indexSegment.getSegmentMetadata().getTotalRawDocs();
_executionStatistics = new ExecutionStatistics(numDocsScanned, numEntriesScannedInFilter, numEntriesScannedPostFilter, numTotalRawDocs);
return new IntermediateResultsBlock(_dataSchema, _rowEvents);
}
use of com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock in project pinot by linkedin.
the class MCombineGroupByOperator method combineBlocks.
/**
* This method combines the result blocks from underlying operators and builds a
* merged, sorted and trimmed result block.
* 1. Result blocks from underlying operators are merged concurrently into a
* HashMap, with appropriate synchronizations. Result blocks themselves are stored
* in the specified blocks[].
* - The key in this concurrent map is the group-by key, and value is an array of
* Objects (one for each aggregation function).
* - Synchronization is provided by locking the group-key that is to be modified.
*
* 2. The result of the concurrent map is then translated into what is expected by
* the broker (List<Map<String, Object>>).
*
* 3. This result is then sorted and then trimmed as per 'TOP N' in the brokerRequest.
*
* @return IntermediateResultBlock containing the final results from combine operation.
*/
private IntermediateResultsBlock combineBlocks() throws InterruptedException {
int numOperators = _operators.size();
final CountDownLatch operatorLatch = new CountDownLatch(numOperators);
final Map<String, Object[]> resultsMap = new ConcurrentHashMap<>();
final ConcurrentLinkedQueue<ProcessingException> mergedProcessingExceptions = new ConcurrentLinkedQueue<>();
List<AggregationInfo> aggregationInfos = _brokerRequest.getAggregationsInfo();
final AggregationFunctionContext[] aggregationFunctionContexts = AggregationFunctionUtils.getAggregationFunctionContexts(aggregationInfos, null);
final int numAggregationFunctions = aggregationFunctionContexts.length;
for (int i = 0; i < numOperators; i++) {
final int index = i;
_executorService.execute(new TraceRunnable() {
@SuppressWarnings("unchecked")
@Override
public void runJob() {
AggregationGroupByResult aggregationGroupByResult;
try {
IntermediateResultsBlock intermediateResultsBlock = (IntermediateResultsBlock) _operators.get(index).nextBlock();
// Merge processing exceptions.
List<ProcessingException> processingExceptionsToMerge = intermediateResultsBlock.getProcessingExceptions();
if (processingExceptionsToMerge != null) {
mergedProcessingExceptions.addAll(processingExceptionsToMerge);
}
// Merge aggregation group-by result.
aggregationGroupByResult = intermediateResultsBlock.getAggregationGroupByResult();
if (aggregationGroupByResult != null) {
// Iterate over the group-by keys, for each key, update the group-by result in the resultsMap.
Iterator<GroupKeyGenerator.GroupKey> groupKeyIterator = aggregationGroupByResult.getGroupKeyIterator();
while (groupKeyIterator.hasNext()) {
GroupKeyGenerator.GroupKey groupKey = groupKeyIterator.next();
String groupKeyString = groupKey.getStringKey();
// HashCode method might return negative value, make it non-negative
int lockIndex = (groupKeyString.hashCode() & Integer.MAX_VALUE) % NUM_LOCKS;
synchronized (LOCKS[lockIndex]) {
Object[] results = resultsMap.get(groupKeyString);
if (results == null) {
results = new Object[numAggregationFunctions];
for (int j = 0; j < numAggregationFunctions; j++) {
results[j] = aggregationGroupByResult.getResultForKey(groupKey, j);
}
resultsMap.put(groupKeyString, results);
} else {
for (int j = 0; j < numAggregationFunctions; j++) {
results[j] = aggregationFunctionContexts[j].getAggregationFunction().merge(results[j], aggregationGroupByResult.getResultForKey(groupKey, j));
}
}
}
}
}
} catch (Exception e) {
LOGGER.error("Exception processing CombineGroupBy for index {}, operator {}", index, _operators.get(index).getClass().getName(), e);
mergedProcessingExceptions.add(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
}
operatorLatch.countDown();
}
});
}
boolean opCompleted = operatorLatch.await(_timeOutMs, TimeUnit.MILLISECONDS);
if (!opCompleted) {
// If this happens, the broker side should already timed out, just log the error in server side.
LOGGER.error("Timed out while combining group-by results, after {}ms.", _timeOutMs);
return new IntermediateResultsBlock(new TimeoutException("CombineGroupBy timed out."));
}
// Trim the results map.
AggregationGroupByTrimmingService aggregationGroupByTrimmingService = new AggregationGroupByTrimmingService(aggregationFunctionContexts, (int) _brokerRequest.getGroupBy().getTopN());
List<Map<String, Object>> trimmedResults = aggregationGroupByTrimmingService.trimIntermediateResultsMap(resultsMap);
IntermediateResultsBlock mergedBlock = new IntermediateResultsBlock(aggregationFunctionContexts, trimmedResults, true);
// Set the processing exceptions.
if (!mergedProcessingExceptions.isEmpty()) {
mergedBlock.setProcessingExceptions(new ArrayList<>(mergedProcessingExceptions));
}
// Set the execution statistics.
ExecutionStatistics executionStatistics = new ExecutionStatistics();
for (Operator operator : _operators) {
ExecutionStatistics executionStatisticsToMerge = operator.getExecutionStatistics();
if (executionStatisticsToMerge != null) {
executionStatistics.merge(executionStatisticsToMerge);
}
}
mergedBlock.setNumDocsScanned(executionStatistics.getNumDocsScanned());
mergedBlock.setNumEntriesScannedInFilter(executionStatistics.getNumEntriesScannedInFilter());
mergedBlock.setNumEntriesScannedPostFilter(executionStatistics.getNumEntriesScannedPostFilter());
mergedBlock.setNumTotalRawDocs(executionStatistics.getNumTotalRawDocs());
return mergedBlock;
}
use of com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock in project pinot by linkedin.
the class MCombineOperator method getNextBlock.
@Override
public Block getNextBlock() {
final long startTime = System.currentTimeMillis();
final long queryEndTime = System.currentTimeMillis() + _timeOutMs;
int numGroups = Math.max(MIN_THREADS_PER_QUERY, Math.min(MAX_THREADS_PER_QUERY, (_operators.size() + MIN_SEGMENTS_PER_THREAD - 1) / MIN_SEGMENTS_PER_THREAD));
//ensure that the number of groups is not more than the number of segments
numGroups = Math.min(_operators.size(), numGroups);
final List<List<Operator>> operatorGroups = new ArrayList<List<Operator>>(numGroups);
for (int i = 0; i < numGroups; i++) {
operatorGroups.add(new ArrayList<Operator>());
}
for (int i = 0; i < _operators.size(); i++) {
operatorGroups.get(i % numGroups).add(_operators.get(i));
}
final BlockingQueue<Block> blockingQueue = new ArrayBlockingQueue<>(operatorGroups.size());
// Submit operators.
for (final List<Operator> operatorGroup : operatorGroups) {
_executorService.submit(new TraceRunnable() {
@Override
public void runJob() {
IntermediateResultsBlock mergedBlock = null;
try {
for (Operator operator : operatorGroup) {
IntermediateResultsBlock blockToMerge = (IntermediateResultsBlock) operator.nextBlock();
if (mergedBlock == null) {
mergedBlock = blockToMerge;
} else {
try {
CombineService.mergeTwoBlocks(_brokerRequest, mergedBlock, blockToMerge);
} catch (Exception e) {
LOGGER.error("Caught exception while merging two blocks (step 1).", e);
mergedBlock.addToProcessingExceptions(QueryException.getException(QueryException.MERGE_RESPONSE_ERROR, e));
}
}
}
} catch (Exception e) {
LOGGER.error("Caught exception while executing query.", e);
mergedBlock = new IntermediateResultsBlock(e);
}
blockingQueue.offer(mergedBlock);
}
});
}
LOGGER.debug("Submitting operators to be run in parallel and it took:" + (System.currentTimeMillis() - startTime));
// Submit merger job:
Future<IntermediateResultsBlock> mergedBlockFuture = _executorService.submit(new TraceCallable<IntermediateResultsBlock>() {
@Override
public IntermediateResultsBlock callJob() throws Exception {
int mergedBlocksNumber = 0;
IntermediateResultsBlock mergedBlock = null;
while ((queryEndTime > System.currentTimeMillis()) && (mergedBlocksNumber < operatorGroups.size())) {
if (mergedBlock == null) {
mergedBlock = (IntermediateResultsBlock) blockingQueue.poll(queryEndTime - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
if (mergedBlock != null) {
mergedBlocksNumber++;
}
LOGGER.debug("Got response from operator 0 after: {}", (System.currentTimeMillis() - startTime));
} else {
IntermediateResultsBlock blockToMerge = (IntermediateResultsBlock) blockingQueue.poll(queryEndTime - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
if (blockToMerge != null) {
try {
LOGGER.debug("Got response from operator {} after: {}", mergedBlocksNumber, (System.currentTimeMillis() - startTime));
CombineService.mergeTwoBlocks(_brokerRequest, mergedBlock, blockToMerge);
LOGGER.debug("Merged response from operator {} after: {}", mergedBlocksNumber, (System.currentTimeMillis() - startTime));
} catch (Exception e) {
LOGGER.error("Caught exception while merging two blocks (step 2).", e);
mergedBlock.addToProcessingExceptions(QueryException.getException(QueryException.MERGE_RESPONSE_ERROR, e));
}
mergedBlocksNumber++;
}
}
}
return mergedBlock;
}
});
// Get merge results.
IntermediateResultsBlock mergedBlock;
try {
mergedBlock = mergedBlockFuture.get(queryEndTime - System.currentTimeMillis(), TimeUnit.MILLISECONDS);
} catch (InterruptedException e) {
LOGGER.error("Caught InterruptedException.", e);
mergedBlock = new IntermediateResultsBlock(QueryException.getException(QueryException.FUTURE_CALL_ERROR, e));
} catch (ExecutionException e) {
LOGGER.error("Caught ExecutionException.", e);
mergedBlock = new IntermediateResultsBlock(QueryException.getException(QueryException.MERGE_RESPONSE_ERROR, e));
} catch (TimeoutException e) {
LOGGER.error("Caught TimeoutException", e);
mergedBlock = new IntermediateResultsBlock(QueryException.getException(QueryException.EXECUTION_TIMEOUT_ERROR, e));
}
// Update execution statistics.
ExecutionStatistics executionStatistics = new ExecutionStatistics();
for (Operator operator : _operators) {
ExecutionStatistics executionStatisticsToMerge = operator.getExecutionStatistics();
if (executionStatisticsToMerge != null) {
executionStatistics.merge(executionStatisticsToMerge);
}
}
mergedBlock.setNumDocsScanned(executionStatistics.getNumDocsScanned());
mergedBlock.setNumEntriesScannedInFilter(executionStatistics.getNumEntriesScannedInFilter());
mergedBlock.setNumEntriesScannedPostFilter(executionStatistics.getNumEntriesScannedPostFilter());
mergedBlock.setNumTotalRawDocs(executionStatistics.getNumTotalRawDocs());
return mergedBlock;
}
use of com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock in project pinot by linkedin.
the class InnerSegmentAggregationMultiValueQueriesTest method testMediumAggregationGroupBy.
@Test
public void testMediumAggregationGroupBy() {
String query = "SELECT" + AGGREGATION + " FROM testTable" + MEDIUM_GROUP_BY;
// NOTE: here we assume the first group key returned from the iterator is constant.
// Test query without filter.
AggregationGroupByOperator aggregationGroupByOperator = getOperatorForQuery(query);
IntermediateResultsBlock resultsBlock = (IntermediateResultsBlock) aggregationGroupByOperator.nextBlock();
ExecutionStatistics executionStatistics = aggregationGroupByOperator.getExecutionStatistics();
Assert.assertEquals(executionStatistics.getNumDocsScanned(), 100000L);
Assert.assertEquals(executionStatistics.getNumEntriesScannedInFilter(), 0L);
Assert.assertEquals(executionStatistics.getNumEntriesScannedPostFilter(), 700000L);
Assert.assertEquals(executionStatistics.getNumTotalRawDocs(), 100000L);
AggregationGroupByResult aggregationGroupByResult = resultsBlock.getAggregationGroupByResult();
GroupKeyGenerator.GroupKey firstGroupKey = aggregationGroupByResult.getGroupKeyIterator().next();
Assert.assertEquals(firstGroupKey.getStringKey(), "w\t3836469\t204");
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 0)).longValue(), 1L);
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 1)).longValue(), 1415527660L);
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 2)).intValue(), 1747635671);
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 3)).intValue(), 1298457813);
AvgPair avgResult = (AvgPair) aggregationGroupByResult.getResultForKey(firstGroupKey, 4);
Assert.assertEquals((long) avgResult.getSum(), 1235208236L);
Assert.assertEquals(avgResult.getCount(), 1L);
// Test query with filter.
aggregationGroupByOperator = getOperatorForQueryWithFilter(query);
resultsBlock = (IntermediateResultsBlock) aggregationGroupByOperator.nextBlock();
executionStatistics = aggregationGroupByOperator.getExecutionStatistics();
Assert.assertEquals(executionStatistics.getNumDocsScanned(), 15620L);
Assert.assertEquals(executionStatistics.getNumEntriesScannedInFilter(), 282430L);
Assert.assertEquals(executionStatistics.getNumEntriesScannedPostFilter(), 109340L);
Assert.assertEquals(executionStatistics.getNumTotalRawDocs(), 100000L);
aggregationGroupByResult = resultsBlock.getAggregationGroupByResult();
firstGroupKey = aggregationGroupByResult.getGroupKeyIterator().next();
Assert.assertEquals(firstGroupKey.getStringKey(), "L\t1483645\t2147483647");
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 0)).longValue(), 1L);
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 1)).longValue(), 650650103L);
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 2)).intValue(), 108417107);
Assert.assertEquals(((Number) aggregationGroupByResult.getResultForKey(firstGroupKey, 3)).intValue(), 674022574);
avgResult = (AvgPair) aggregationGroupByResult.getResultForKey(firstGroupKey, 4);
Assert.assertEquals((long) avgResult.getSum(), 674022574L);
Assert.assertEquals(avgResult.getCount(), 1L);
}
Aggregations