Search in sources :

Example 1 with Plan

use of com.linkedin.pinot.core.plan.Plan in project pinot by linkedin.

the class BaseQueriesTest method getBrokerResponseForQuery.

/**
   * Run query on multiple index segments.
   * <p>Use this to test the whole flow from server to broker.
   * <p>The result should be equivalent to querying 4 identical index segments.
   *
   * @param query PQL query.
   * @return broker response.
   */
protected BrokerResponseNative getBrokerResponseForQuery(String query) {
    BrokerRequest brokerRequest = COMPILER.compileToBrokerRequest(query);
    // Server side.
    Plan plan = PLAN_MAKER.makeInterSegmentPlan(getSegmentDataManagers(), brokerRequest, EXECUTOR_SERVICE, 10_000);
    plan.execute();
    DataTable instanceResponse = plan.getInstanceResponse();
    // Broker side.
    BrokerReduceService brokerReduceService = new BrokerReduceService();
    Map<ServerInstance, DataTable> dataTableMap = new HashMap<>();
    dataTableMap.put(new ServerInstance("localhost:0000"), instanceResponse);
    dataTableMap.put(new ServerInstance("localhost:1111"), instanceResponse);
    return brokerReduceService.reduceOnDataTable(brokerRequest, dataTableMap);
}
Also used : BrokerReduceService(com.linkedin.pinot.core.query.reduce.BrokerReduceService) DataTable(com.linkedin.pinot.common.utils.DataTable) HashMap(java.util.HashMap) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) Plan(com.linkedin.pinot.core.plan.Plan) ServerInstance(com.linkedin.pinot.common.response.ServerInstance)

Example 2 with Plan

use of com.linkedin.pinot.core.plan.Plan in project pinot by linkedin.

the class ServerQueryExecutorV1Impl method processQuery.

@Override
public DataTable processQuery(final QueryRequest queryRequest, ExecutorService executorService) {
    TimerContext timerContext = queryRequest.getTimerContext();
    TimerContext.Timer schedulerWaitTimer = timerContext.getPhaseTimer(ServerQueryPhase.SCHEDULER_WAIT);
    if (schedulerWaitTimer != null) {
        schedulerWaitTimer.stopAndRecord();
    }
    TimerContext.Timer queryProcessingTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PROCESSING);
    DataTable dataTable;
    List<SegmentDataManager> queryableSegmentDataManagerList = null;
    InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
    final long requestId = instanceRequest.getRequestId();
    try {
        TraceContext.register(instanceRequest);
        final BrokerRequest brokerRequest = instanceRequest.getQuery();
        LOGGER.debug("Incoming query is : {}", brokerRequest);
        TimerContext.Timer segmentPruneTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.SEGMENT_PRUNING);
        final String tableName = instanceRequest.getQuery().getQuerySource().getTableName();
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(tableName);
        queryableSegmentDataManagerList = acquireQueryableSegments(tableDataManager, instanceRequest);
        long totalRawDocs = pruneSegments(tableDataManager, queryableSegmentDataManagerList, instanceRequest.getQuery());
        segmentPruneTimer.stopAndRecord();
        int numSegmentsMatched = queryableSegmentDataManagerList.size();
        queryRequest.setSegmentCountAfterPruning(numSegmentsMatched);
        LOGGER.debug("Matched {} segments", numSegmentsMatched);
        if (numSegmentsMatched == 0) {
            DataTable emptyDataTable = DataTableBuilder.buildEmptyDataTable(brokerRequest);
            emptyDataTable.getMetadata().put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
            return emptyDataTable;
        }
        TimerContext.Timer planBuildTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.BUILD_QUERY_PLAN);
        final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList, brokerRequest, executorService, getResourceTimeOut(instanceRequest.getQuery()));
        planBuildTimer.stopAndRecord();
        if (_printQueryPlan) {
            LOGGER.debug("***************************** Query Plan for Request {} ***********************************", instanceRequest.getRequestId());
            globalQueryPlan.print();
            LOGGER.debug("*********************************** End Query Plan ***********************************");
        }
        TimerContext.Timer planExecTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PLAN_EXECUTION);
        globalQueryPlan.execute();
        planExecTimer.stopAndRecord();
        dataTable = globalQueryPlan.getInstanceResponse();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        queryProcessingTimer.stopAndRecord();
        LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(), queryProcessingTimer.getDurationNs());
        LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationMs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        // Update the total docs in the metadata based on un-pruned segments.
        dataTableMetadata.put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
        return dataTable;
    } catch (Exception e) {
        _serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS, 1);
        LOGGER.error("Exception processing requestId {}", requestId, e);
        dataTable = new DataTableImplV2();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        dataTable.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
        TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
        queryProcessingTimer.stopAndRecord();
        LOGGER.info("Searching Instance for Request Id - {}, browse took: {}, instanceResponse: {}", requestId, queryProcessingTimer.getDurationMs(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationNs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        return dataTable;
    } finally {
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(queryRequest.getTableName());
        if (tableDataManager != null && queryableSegmentDataManagerList != null) {
            for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
                tableDataManager.releaseSegment(segmentDataManager);
            }
        }
        TraceContext.unregister(instanceRequest);
    }
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) Plan(com.linkedin.pinot.core.plan.Plan) QueryException(com.linkedin.pinot.common.exception.QueryException) ConfigurationException(org.apache.commons.configuration.ConfigurationException) SegmentDataManager(com.linkedin.pinot.core.data.manager.offline.SegmentDataManager) TimerContext(com.linkedin.pinot.common.query.context.TimerContext) TableDataManager(com.linkedin.pinot.core.data.manager.offline.TableDataManager) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest)

Aggregations

BrokerRequest (com.linkedin.pinot.common.request.BrokerRequest)2 DataTable (com.linkedin.pinot.common.utils.DataTable)2 Plan (com.linkedin.pinot.core.plan.Plan)2 QueryException (com.linkedin.pinot.common.exception.QueryException)1 TimerContext (com.linkedin.pinot.common.query.context.TimerContext)1 InstanceRequest (com.linkedin.pinot.common.request.InstanceRequest)1 ServerInstance (com.linkedin.pinot.common.response.ServerInstance)1 DataTableImplV2 (com.linkedin.pinot.core.common.datatable.DataTableImplV2)1 SegmentDataManager (com.linkedin.pinot.core.data.manager.offline.SegmentDataManager)1 TableDataManager (com.linkedin.pinot.core.data.manager.offline.TableDataManager)1 BrokerReduceService (com.linkedin.pinot.core.query.reduce.BrokerReduceService)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 ConfigurationException (org.apache.commons.configuration.ConfigurationException)1