Search in sources :

Example 1 with ProcessingException

use of com.linkedin.pinot.common.response.ProcessingException in project pinot by linkedin.

the class MCombineGroupByOperator method combineBlocks.

/**
   * This method combines the result blocks from underlying operators and builds a
   * merged, sorted and trimmed result block.
   * 1. Result blocks from underlying operators are merged concurrently into a
   *   HashMap, with appropriate synchronizations. Result blocks themselves are stored
   *   in the specified blocks[].
   *   - The key in this concurrent map is the group-by key, and value is an array of
   *     Objects (one for each aggregation function).
   *   - Synchronization is provided by locking the group-key that is to be modified.
   *
   * 2. The result of the concurrent map is then translated into what is expected by
   *    the broker (List<Map<String, Object>>).
   *
   * 3. This result is then sorted and then trimmed as per 'TOP N' in the brokerRequest.
   *
   * @return IntermediateResultBlock containing the final results from combine operation.
   */
private IntermediateResultsBlock combineBlocks() throws InterruptedException {
    int numOperators = _operators.size();
    final CountDownLatch operatorLatch = new CountDownLatch(numOperators);
    final Map<String, Object[]> resultsMap = new ConcurrentHashMap<>();
    final ConcurrentLinkedQueue<ProcessingException> mergedProcessingExceptions = new ConcurrentLinkedQueue<>();
    List<AggregationInfo> aggregationInfos = _brokerRequest.getAggregationsInfo();
    final AggregationFunctionContext[] aggregationFunctionContexts = AggregationFunctionUtils.getAggregationFunctionContexts(aggregationInfos, null);
    final int numAggregationFunctions = aggregationFunctionContexts.length;
    for (int i = 0; i < numOperators; i++) {
        final int index = i;
        _executorService.execute(new TraceRunnable() {

            @SuppressWarnings("unchecked")
            @Override
            public void runJob() {
                AggregationGroupByResult aggregationGroupByResult;
                try {
                    IntermediateResultsBlock intermediateResultsBlock = (IntermediateResultsBlock) _operators.get(index).nextBlock();
                    // Merge processing exceptions.
                    List<ProcessingException> processingExceptionsToMerge = intermediateResultsBlock.getProcessingExceptions();
                    if (processingExceptionsToMerge != null) {
                        mergedProcessingExceptions.addAll(processingExceptionsToMerge);
                    }
                    // Merge aggregation group-by result.
                    aggregationGroupByResult = intermediateResultsBlock.getAggregationGroupByResult();
                    if (aggregationGroupByResult != null) {
                        // Iterate over the group-by keys, for each key, update the group-by result in the resultsMap.
                        Iterator<GroupKeyGenerator.GroupKey> groupKeyIterator = aggregationGroupByResult.getGroupKeyIterator();
                        while (groupKeyIterator.hasNext()) {
                            GroupKeyGenerator.GroupKey groupKey = groupKeyIterator.next();
                            String groupKeyString = groupKey.getStringKey();
                            // HashCode method might return negative value, make it non-negative
                            int lockIndex = (groupKeyString.hashCode() & Integer.MAX_VALUE) % NUM_LOCKS;
                            synchronized (LOCKS[lockIndex]) {
                                Object[] results = resultsMap.get(groupKeyString);
                                if (results == null) {
                                    results = new Object[numAggregationFunctions];
                                    for (int j = 0; j < numAggregationFunctions; j++) {
                                        results[j] = aggregationGroupByResult.getResultForKey(groupKey, j);
                                    }
                                    resultsMap.put(groupKeyString, results);
                                } else {
                                    for (int j = 0; j < numAggregationFunctions; j++) {
                                        results[j] = aggregationFunctionContexts[j].getAggregationFunction().merge(results[j], aggregationGroupByResult.getResultForKey(groupKey, j));
                                    }
                                }
                            }
                        }
                    }
                } catch (Exception e) {
                    LOGGER.error("Exception processing CombineGroupBy for index {}, operator {}", index, _operators.get(index).getClass().getName(), e);
                    mergedProcessingExceptions.add(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
                }
                operatorLatch.countDown();
            }
        });
    }
    boolean opCompleted = operatorLatch.await(_timeOutMs, TimeUnit.MILLISECONDS);
    if (!opCompleted) {
        // If this happens, the broker side should already timed out, just log the error in server side.
        LOGGER.error("Timed out while combining group-by results, after {}ms.", _timeOutMs);
        return new IntermediateResultsBlock(new TimeoutException("CombineGroupBy timed out."));
    }
    // Trim the results map.
    AggregationGroupByTrimmingService aggregationGroupByTrimmingService = new AggregationGroupByTrimmingService(aggregationFunctionContexts, (int) _brokerRequest.getGroupBy().getTopN());
    List<Map<String, Object>> trimmedResults = aggregationGroupByTrimmingService.trimIntermediateResultsMap(resultsMap);
    IntermediateResultsBlock mergedBlock = new IntermediateResultsBlock(aggregationFunctionContexts, trimmedResults, true);
    // Set the processing exceptions.
    if (!mergedProcessingExceptions.isEmpty()) {
        mergedBlock.setProcessingExceptions(new ArrayList<>(mergedProcessingExceptions));
    }
    // Set the execution statistics.
    ExecutionStatistics executionStatistics = new ExecutionStatistics();
    for (Operator operator : _operators) {
        ExecutionStatistics executionStatisticsToMerge = operator.getExecutionStatistics();
        if (executionStatisticsToMerge != null) {
            executionStatistics.merge(executionStatisticsToMerge);
        }
    }
    mergedBlock.setNumDocsScanned(executionStatistics.getNumDocsScanned());
    mergedBlock.setNumEntriesScannedInFilter(executionStatistics.getNumEntriesScannedInFilter());
    mergedBlock.setNumEntriesScannedPostFilter(executionStatistics.getNumEntriesScannedPostFilter());
    mergedBlock.setNumTotalRawDocs(executionStatistics.getNumTotalRawDocs());
    return mergedBlock;
}
Also used : Operator(com.linkedin.pinot.core.common.Operator) AggregationGroupByTrimmingService(com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByTrimmingService) AggregationGroupByResult(com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByResult) TraceRunnable(com.linkedin.pinot.core.util.trace.TraceRunnable) Iterator(java.util.Iterator) ArrayList(java.util.ArrayList) List(java.util.List) AggregationInfo(com.linkedin.pinot.common.request.AggregationInfo) IntermediateResultsBlock(com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) AggregationFunctionContext(com.linkedin.pinot.core.query.aggregation.AggregationFunctionContext) GroupKeyGenerator(com.linkedin.pinot.core.query.aggregation.groupby.GroupKeyGenerator) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) TimeoutException(java.util.concurrent.TimeoutException) CountDownLatch(java.util.concurrent.CountDownLatch) TimeoutException(java.util.concurrent.TimeoutException) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) QueryException(com.linkedin.pinot.common.exception.QueryException) ConcurrentLinkedQueue(java.util.concurrent.ConcurrentLinkedQueue) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 2 with ProcessingException

use of com.linkedin.pinot.common.response.ProcessingException in project pinot by linkedin.

the class DataTableSerDeTest method testException.

@Test
public void testException() throws IOException {
    Exception exception = new UnsupportedOperationException("Caught exception.");
    ProcessingException processingException = QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, exception);
    String expected = processingException.getMessage();
    DataTable dataTable = new DataTableImplV2();
    dataTable.addException(processingException);
    DataTable newDataTable = DataTableFactory.getDataTable(dataTable.toBytes());
    Assert.assertNull(newDataTable.getDataSchema());
    Assert.assertEquals(newDataTable.getNumberOfRows(), 0);
    String actual = newDataTable.getMetadata().get(DataTable.EXCEPTION_METADATA_KEY + QueryException.QUERY_EXECUTION_ERROR.getErrorCode());
    Assert.assertEquals(actual, expected);
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) IOException(java.io.IOException) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) QueryException(com.linkedin.pinot.common.exception.QueryException) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) Test(org.testng.annotations.Test)

Example 3 with ProcessingException

use of com.linkedin.pinot.common.response.ProcessingException in project pinot by linkedin.

the class CombineService method mergeTwoBlocks.

public static void mergeTwoBlocks(@Nonnull BrokerRequest brokerRequest, @Nonnull IntermediateResultsBlock mergedBlock, @Nonnull IntermediateResultsBlock blockToMerge) {
    // Combine processing exceptions.
    List<ProcessingException> mergedProcessingExceptions = mergedBlock.getProcessingExceptions();
    List<ProcessingException> processingExceptionsToMerge = blockToMerge.getProcessingExceptions();
    if (mergedProcessingExceptions == null) {
        mergedBlock.setProcessingExceptions(processingExceptionsToMerge);
    } else if (processingExceptionsToMerge != null) {
        mergedProcessingExceptions.addAll(processingExceptionsToMerge);
    }
    // Combine result.
    if (brokerRequest.isSetAggregationsInfo()) {
        if (!brokerRequest.isSetGroupBy()) {
            // Combine aggregation only result.
            // Might be null if caught exception during query execution.
            List<Object> aggregationResultToMerge = blockToMerge.getAggregationResult();
            if (aggregationResultToMerge == null) {
                // No data in block to merge.
                return;
            }
            AggregationFunctionContext[] mergedAggregationFunctionContexts = mergedBlock.getAggregationFunctionContexts();
            if (mergedAggregationFunctionContexts == null) {
                // No data in merged block.
                mergedBlock.setAggregationFunctionContexts(blockToMerge.getAggregationFunctionContexts());
                mergedBlock.setAggregationResults(aggregationResultToMerge);
            }
            // Merge two block.
            List<Object> mergedAggregationResult = mergedBlock.getAggregationResult();
            int numAggregationFunctions = mergedAggregationFunctionContexts.length;
            for (int i = 0; i < numAggregationFunctions; i++) {
                mergedAggregationResult.set(i, mergedAggregationFunctionContexts[i].getAggregationFunction().merge(mergedAggregationResult.get(i), aggregationResultToMerge.get(i)));
            }
        } else {
            // Combine aggregation group-by result, which should not come into CombineService.
            throw new UnsupportedOperationException();
        }
    } else {
        // Combine selection result.
        // Data schema will be null if exceptions caught during query processing.
        // Result set size will be zero if no row matches the predicate.
        DataSchema mergedBlockSchema = mergedBlock.getSelectionDataSchema();
        DataSchema blockToMergeSchema = blockToMerge.getSelectionDataSchema();
        Collection<Serializable[]> mergedBlockResultSet = mergedBlock.getSelectionResult();
        Collection<Serializable[]> blockToMergeResultSet = blockToMerge.getSelectionResult();
        if (mergedBlockSchema == null || mergedBlockResultSet.size() == 0) {
            // If block to merge schema is not null, set its data schema and result to the merged block.
            if (blockToMergeSchema != null) {
                mergedBlock.setSelectionDataSchema(blockToMergeSchema);
                mergedBlock.setSelectionResult(blockToMergeResultSet);
            }
        } else {
            // Some data in merged block.
            Selection selection = brokerRequest.getSelections();
            boolean isSelectionOrderBy = selection.isSetSelectionSortSequence();
            int selectionSize = selection.getSize();
            // No need to merge if already got enough rows for selection only.
            if (!isSelectionOrderBy && (mergedBlockResultSet.size() == selectionSize)) {
                return;
            }
            // Merge only if there are data in block to merge.
            if (blockToMergeSchema != null && blockToMergeResultSet.size() > 0) {
                if (mergedBlockSchema.isTypeCompatibleWith(blockToMergeSchema)) {
                    // Two blocks are mergeable.
                    // Upgrade the merged block schema if necessary.
                    mergedBlockSchema.upgradeToCover(blockToMergeSchema);
                    // Merge two blocks.
                    if (isSelectionOrderBy) {
                        // Combine selection order-by.
                        SelectionOperatorUtils.mergeWithOrdering((PriorityQueue<Serializable[]>) mergedBlockResultSet, blockToMergeResultSet, selection.getOffset() + selectionSize);
                    } else {
                        // Combine selection only.
                        SelectionOperatorUtils.mergeWithoutOrdering(mergedBlockResultSet, blockToMergeResultSet, selectionSize);
                    }
                    mergedBlock.setSelectionResult(mergedBlockResultSet);
                } else {
                    // Two blocks are not mergeable.
                    throw new RuntimeException("Data schema inconsistency between merged block schema: " + mergedBlockSchema + " and block to merge schema: " + blockToMergeSchema + ", drop block to merge.");
                }
            }
        }
    }
}
Also used : Selection(com.linkedin.pinot.common.request.Selection) DataSchema(com.linkedin.pinot.common.utils.DataSchema) AggregationFunctionContext(com.linkedin.pinot.core.query.aggregation.AggregationFunctionContext) ProcessingException(com.linkedin.pinot.common.response.ProcessingException)

Example 4 with ProcessingException

use of com.linkedin.pinot.common.response.ProcessingException in project pinot by linkedin.

the class BrokerRequestHandler method processOptimizedBrokerRequests.

/**
   * Process the optimized broker requests for both OFFLINE and REALTIME table.
   *
   * @param originalBrokerRequest original broker request.
   * @param offlineBrokerRequest broker request for OFFLINE table.
   * @param realtimeBrokerRequest broker request for REALTIME table.
   * @param reduceService reduce service.
   * @param bucketingSelection customized bucketing selection.
   * @param scatterGatherStats scatter-gather statistics.
   * @param requestId request ID.
   * @return broker response.
   * @throws InterruptedException
   */
@Nonnull
private BrokerResponse processOptimizedBrokerRequests(@Nonnull BrokerRequest originalBrokerRequest, @Nullable BrokerRequest offlineBrokerRequest, @Nullable BrokerRequest realtimeBrokerRequest, @Nonnull ReduceService reduceService, @Nonnull ScatterGatherStats scatterGatherStats, @Nullable BucketingSelection bucketingSelection, long requestId) throws InterruptedException {
    String originalTableName = originalBrokerRequest.getQuerySource().getTableName();
    ResponseType serverResponseType = BrokerResponseFactory.getResponseType(originalBrokerRequest.getResponseFormat());
    PhaseTimes phaseTimes = new PhaseTimes();
    // Step 1: find the candidate servers to be queried for each set of segments from the routing table.
    // Step 2: select servers for each segment set and scatter request to the servers.
    String offlineTableName = null;
    CompositeFuture<ServerInstance, ByteBuf> offlineCompositeFuture = null;
    if (offlineBrokerRequest != null) {
        offlineTableName = offlineBrokerRequest.getQuerySource().getTableName();
        offlineCompositeFuture = routeAndScatterBrokerRequest(offlineBrokerRequest, phaseTimes, scatterGatherStats, true, bucketingSelection, requestId);
    }
    String realtimeTableName = null;
    CompositeFuture<ServerInstance, ByteBuf> realtimeCompositeFuture = null;
    if (realtimeBrokerRequest != null) {
        realtimeTableName = realtimeBrokerRequest.getQuerySource().getTableName();
        realtimeCompositeFuture = routeAndScatterBrokerRequest(realtimeBrokerRequest, phaseTimes, scatterGatherStats, false, bucketingSelection, requestId);
    }
    if ((offlineCompositeFuture == null) && (realtimeCompositeFuture == null)) {
        // No server found in either OFFLINE or REALTIME table.
        return BrokerResponseFactory.getStaticEmptyBrokerResponse(serverResponseType);
    }
    // Step 3: gather response from the servers.
    int numServersQueried = 0;
    long gatherStartTime = System.nanoTime();
    List<ProcessingException> processingExceptions = new ArrayList<>();
    Map<ServerInstance, ByteBuf> offlineServerResponseMap = null;
    Map<ServerInstance, ByteBuf> realtimeServerResponseMap = null;
    if (offlineCompositeFuture != null) {
        numServersQueried += offlineCompositeFuture.getNumFutures();
        offlineServerResponseMap = gatherServerResponses(offlineCompositeFuture, scatterGatherStats, true, offlineTableName, processingExceptions);
    }
    if (realtimeCompositeFuture != null) {
        numServersQueried += realtimeCompositeFuture.getNumFutures();
        realtimeServerResponseMap = gatherServerResponses(realtimeCompositeFuture, scatterGatherStats, false, realtimeTableName, processingExceptions);
    }
    phaseTimes.addToGatherTime(System.nanoTime() - gatherStartTime);
    if ((offlineServerResponseMap == null) && (realtimeServerResponseMap == null)) {
        // No response gathered.
        return BrokerResponseFactory.getBrokerResponseWithExceptions(serverResponseType, processingExceptions);
    }
    //Step 4: deserialize the server responses.
    int numServersResponded = 0;
    long deserializationStartTime = System.nanoTime();
    Map<ServerInstance, DataTable> dataTableMap = new HashMap<>();
    if (offlineServerResponseMap != null) {
        numServersResponded += offlineServerResponseMap.size();
        deserializeServerResponses(offlineServerResponseMap, true, dataTableMap, offlineTableName, processingExceptions);
    }
    if (realtimeServerResponseMap != null) {
        numServersResponded += realtimeServerResponseMap.size();
        deserializeServerResponses(realtimeServerResponseMap, false, dataTableMap, realtimeTableName, processingExceptions);
    }
    phaseTimes.addToDeserializationTime(System.nanoTime() - deserializationStartTime);
    // Step 5: reduce (merge) the server responses and create a broker response to be returned.
    long reduceStartTime = System.nanoTime();
    BrokerResponse brokerResponse = reduceService.reduceOnDataTable(originalBrokerRequest, dataTableMap, _brokerMetrics);
    phaseTimes.addToReduceTime(System.nanoTime() - reduceStartTime);
    // Set processing exceptions and number of servers queried/responded.
    brokerResponse.setExceptions(processingExceptions);
    brokerResponse.setNumServersQueried(numServersQueried);
    brokerResponse.setNumServersResponded(numServersResponded);
    // Update broker metrics.
    phaseTimes.addPhaseTimesToBrokerMetrics(_brokerMetrics, originalTableName);
    if (brokerResponse.getExceptionsSize() > 0) {
        _brokerMetrics.addMeteredTableValue(originalTableName, BrokerMeter.BROKER_RESPONSES_WITH_PROCESSING_EXCEPTIONS, 1);
    }
    if (numServersQueried > numServersResponded) {
        _brokerMetrics.addMeteredTableValue(originalTableName, BrokerMeter.BROKER_RESPONSES_WITH_PARTIAL_SERVERS_RESPONDED, 1);
    }
    return brokerResponse;
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) ByteBuf(io.netty.buffer.ByteBuf) ResponseType(com.linkedin.pinot.common.response.BrokerResponseFactory.ResponseType) BrokerResponse(com.linkedin.pinot.common.response.BrokerResponse) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) Nonnull(javax.annotation.Nonnull)

Example 5 with ProcessingException

use of com.linkedin.pinot.common.response.ProcessingException in project pinot by linkedin.

the class BrokerRequestHandler method deserializeServerResponses.

/**
   * Deserialize the server responses, put the de-serialized data table into the data table map passed in, append
   * processing exceptions to the processing exception list passed in.
   * <p>For hybrid use case, multiple responses might be from the same instance. Use response sequence to distinguish
   * them.
   *
   * @param responseMap map from server to response.
   * @param isOfflineTable whether the responses are from an OFFLINE table.
   * @param dataTableMap map from server to data table.
   * @param tableName table name.
   * @param processingExceptions list of processing exceptions.
   */
private void deserializeServerResponses(@Nonnull Map<ServerInstance, ByteBuf> responseMap, boolean isOfflineTable, @Nonnull Map<ServerInstance, DataTable> dataTableMap, @Nonnull String tableName, @Nonnull List<ProcessingException> processingExceptions) {
    for (Entry<ServerInstance, ByteBuf> entry : responseMap.entrySet()) {
        ServerInstance serverInstance = entry.getKey();
        if (!isOfflineTable) {
            serverInstance = new ServerInstance(serverInstance.getHostname(), serverInstance.getPort(), 1);
        }
        ByteBuf byteBuf = entry.getValue();
        try {
            byte[] byteArray = new byte[byteBuf.readableBytes()];
            byteBuf.readBytes(byteArray);
            dataTableMap.put(serverInstance, DataTableFactory.getDataTable(byteArray));
        } catch (Exception e) {
            LOGGER.error("Caught exceptions while deserializing response for table: {} from server: {}", tableName, serverInstance, e);
            _brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.DATA_TABLE_DESERIALIZATION_EXCEPTIONS, 1);
            processingExceptions.add(QueryException.getException(QueryException.DATA_TABLE_DESERIALIZATION_ERROR, e));
        }
    }
}
Also used : ServerInstance(com.linkedin.pinot.common.response.ServerInstance) ByteBuf(io.netty.buffer.ByteBuf) ProcessingException(com.linkedin.pinot.common.response.ProcessingException) QueryException(com.linkedin.pinot.common.exception.QueryException) UnknownHostException(java.net.UnknownHostException)

Aggregations

ProcessingException (com.linkedin.pinot.common.response.ProcessingException)7 QueryException (com.linkedin.pinot.common.exception.QueryException)3 ServerInstance (com.linkedin.pinot.common.response.ServerInstance)2 DataTable (com.linkedin.pinot.common.utils.DataTable)2 AggregationFunctionContext (com.linkedin.pinot.core.query.aggregation.AggregationFunctionContext)2 ByteBuf (io.netty.buffer.ByteBuf)2 ArrayList (java.util.ArrayList)2 AggregationInfo (com.linkedin.pinot.common.request.AggregationInfo)1 Selection (com.linkedin.pinot.common.request.Selection)1 BrokerResponse (com.linkedin.pinot.common.response.BrokerResponse)1 ResponseType (com.linkedin.pinot.common.response.BrokerResponseFactory.ResponseType)1 DataSchema (com.linkedin.pinot.common.utils.DataSchema)1 Operator (com.linkedin.pinot.core.common.Operator)1 IntermediateResultsBlock (com.linkedin.pinot.core.operator.blocks.IntermediateResultsBlock)1 AggregationGroupByResult (com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByResult)1 AggregationGroupByTrimmingService (com.linkedin.pinot.core.query.aggregation.groupby.AggregationGroupByTrimmingService)1 GroupKeyGenerator (com.linkedin.pinot.core.query.aggregation.groupby.GroupKeyGenerator)1 TraceRunnable (com.linkedin.pinot.core.util.trace.TraceRunnable)1 IOException (java.io.IOException)1 PrintWriter (java.io.PrintWriter)1