use of com.linkedin.pinot.core.plan.Plan in project pinot by linkedin.
the class BaseQueriesTest method getBrokerResponseForQuery.
/**
* Run query on multiple index segments.
* <p>Use this to test the whole flow from server to broker.
* <p>The result should be equivalent to querying 4 identical index segments.
*
* @param query PQL query.
* @return broker response.
*/
protected BrokerResponseNative getBrokerResponseForQuery(String query) {
BrokerRequest brokerRequest = COMPILER.compileToBrokerRequest(query);
// Server side.
Plan plan = PLAN_MAKER.makeInterSegmentPlan(getSegmentDataManagers(), brokerRequest, EXECUTOR_SERVICE, 10_000);
plan.execute();
DataTable instanceResponse = plan.getInstanceResponse();
// Broker side.
BrokerReduceService brokerReduceService = new BrokerReduceService();
Map<ServerInstance, DataTable> dataTableMap = new HashMap<>();
dataTableMap.put(new ServerInstance("localhost:0000"), instanceResponse);
dataTableMap.put(new ServerInstance("localhost:1111"), instanceResponse);
return brokerReduceService.reduceOnDataTable(brokerRequest, dataTableMap);
}
use of com.linkedin.pinot.core.plan.Plan in project pinot by linkedin.
the class ServerQueryExecutorV1Impl method processQuery.
@Override
public DataTable processQuery(final QueryRequest queryRequest, ExecutorService executorService) {
TimerContext timerContext = queryRequest.getTimerContext();
TimerContext.Timer schedulerWaitTimer = timerContext.getPhaseTimer(ServerQueryPhase.SCHEDULER_WAIT);
if (schedulerWaitTimer != null) {
schedulerWaitTimer.stopAndRecord();
}
TimerContext.Timer queryProcessingTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PROCESSING);
DataTable dataTable;
List<SegmentDataManager> queryableSegmentDataManagerList = null;
InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
final long requestId = instanceRequest.getRequestId();
try {
TraceContext.register(instanceRequest);
final BrokerRequest brokerRequest = instanceRequest.getQuery();
LOGGER.debug("Incoming query is : {}", brokerRequest);
TimerContext.Timer segmentPruneTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.SEGMENT_PRUNING);
final String tableName = instanceRequest.getQuery().getQuerySource().getTableName();
TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(tableName);
queryableSegmentDataManagerList = acquireQueryableSegments(tableDataManager, instanceRequest);
long totalRawDocs = pruneSegments(tableDataManager, queryableSegmentDataManagerList, instanceRequest.getQuery());
segmentPruneTimer.stopAndRecord();
int numSegmentsMatched = queryableSegmentDataManagerList.size();
queryRequest.setSegmentCountAfterPruning(numSegmentsMatched);
LOGGER.debug("Matched {} segments", numSegmentsMatched);
if (numSegmentsMatched == 0) {
DataTable emptyDataTable = DataTableBuilder.buildEmptyDataTable(brokerRequest);
emptyDataTable.getMetadata().put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
return emptyDataTable;
}
TimerContext.Timer planBuildTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.BUILD_QUERY_PLAN);
final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList, brokerRequest, executorService, getResourceTimeOut(instanceRequest.getQuery()));
planBuildTimer.stopAndRecord();
if (_printQueryPlan) {
LOGGER.debug("***************************** Query Plan for Request {} ***********************************", instanceRequest.getRequestId());
globalQueryPlan.print();
LOGGER.debug("*********************************** End Query Plan ***********************************");
}
TimerContext.Timer planExecTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PLAN_EXECUTION);
globalQueryPlan.execute();
planExecTimer.stopAndRecord();
dataTable = globalQueryPlan.getInstanceResponse();
Map<String, String> dataTableMetadata = dataTable.getMetadata();
queryProcessingTimer.stopAndRecord();
LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(), queryProcessingTimer.getDurationNs());
LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(), dataTable.toString());
dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationMs()));
dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
// Update the total docs in the metadata based on un-pruned segments.
dataTableMetadata.put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
return dataTable;
} catch (Exception e) {
_serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS, 1);
LOGGER.error("Exception processing requestId {}", requestId, e);
dataTable = new DataTableImplV2();
Map<String, String> dataTableMetadata = dataTable.getMetadata();
dataTable.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
queryProcessingTimer.stopAndRecord();
LOGGER.info("Searching Instance for Request Id - {}, browse took: {}, instanceResponse: {}", requestId, queryProcessingTimer.getDurationMs(), dataTable.toString());
dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationNs()));
dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
return dataTable;
} finally {
TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(queryRequest.getTableName());
if (tableDataManager != null && queryableSegmentDataManagerList != null) {
for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
tableDataManager.releaseSegment(segmentDataManager);
}
}
TraceContext.unregister(instanceRequest);
}
}
Aggregations