Search in sources :

Example 1 with TimerContext

use of com.linkedin.pinot.common.query.context.TimerContext in project pinot by linkedin.

the class ServerQueryExecutorV1Impl method processQuery.

@Override
public DataTable processQuery(final QueryRequest queryRequest, ExecutorService executorService) {
    TimerContext timerContext = queryRequest.getTimerContext();
    TimerContext.Timer schedulerWaitTimer = timerContext.getPhaseTimer(ServerQueryPhase.SCHEDULER_WAIT);
    if (schedulerWaitTimer != null) {
        schedulerWaitTimer.stopAndRecord();
    }
    TimerContext.Timer queryProcessingTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PROCESSING);
    DataTable dataTable;
    List<SegmentDataManager> queryableSegmentDataManagerList = null;
    InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
    final long requestId = instanceRequest.getRequestId();
    try {
        TraceContext.register(instanceRequest);
        final BrokerRequest brokerRequest = instanceRequest.getQuery();
        LOGGER.debug("Incoming query is : {}", brokerRequest);
        TimerContext.Timer segmentPruneTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.SEGMENT_PRUNING);
        final String tableName = instanceRequest.getQuery().getQuerySource().getTableName();
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(tableName);
        queryableSegmentDataManagerList = acquireQueryableSegments(tableDataManager, instanceRequest);
        long totalRawDocs = pruneSegments(tableDataManager, queryableSegmentDataManagerList, instanceRequest.getQuery());
        segmentPruneTimer.stopAndRecord();
        int numSegmentsMatched = queryableSegmentDataManagerList.size();
        queryRequest.setSegmentCountAfterPruning(numSegmentsMatched);
        LOGGER.debug("Matched {} segments", numSegmentsMatched);
        if (numSegmentsMatched == 0) {
            DataTable emptyDataTable = DataTableBuilder.buildEmptyDataTable(brokerRequest);
            emptyDataTable.getMetadata().put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
            return emptyDataTable;
        }
        TimerContext.Timer planBuildTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.BUILD_QUERY_PLAN);
        final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList, brokerRequest, executorService, getResourceTimeOut(instanceRequest.getQuery()));
        planBuildTimer.stopAndRecord();
        if (_printQueryPlan) {
            LOGGER.debug("***************************** Query Plan for Request {} ***********************************", instanceRequest.getRequestId());
            globalQueryPlan.print();
            LOGGER.debug("*********************************** End Query Plan ***********************************");
        }
        TimerContext.Timer planExecTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PLAN_EXECUTION);
        globalQueryPlan.execute();
        planExecTimer.stopAndRecord();
        dataTable = globalQueryPlan.getInstanceResponse();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        queryProcessingTimer.stopAndRecord();
        LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(), queryProcessingTimer.getDurationNs());
        LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationMs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        // Update the total docs in the metadata based on un-pruned segments.
        dataTableMetadata.put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
        return dataTable;
    } catch (Exception e) {
        _serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS, 1);
        LOGGER.error("Exception processing requestId {}", requestId, e);
        dataTable = new DataTableImplV2();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        dataTable.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
        TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
        queryProcessingTimer.stopAndRecord();
        LOGGER.info("Searching Instance for Request Id - {}, browse took: {}, instanceResponse: {}", requestId, queryProcessingTimer.getDurationMs(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationNs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        return dataTable;
    } finally {
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(queryRequest.getTableName());
        if (tableDataManager != null && queryableSegmentDataManagerList != null) {
            for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
                tableDataManager.releaseSegment(segmentDataManager);
            }
        }
        TraceContext.unregister(instanceRequest);
    }
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) Plan(com.linkedin.pinot.core.plan.Plan) QueryException(com.linkedin.pinot.common.exception.QueryException) ConfigurationException(org.apache.commons.configuration.ConfigurationException) SegmentDataManager(com.linkedin.pinot.core.data.manager.offline.SegmentDataManager) TimerContext(com.linkedin.pinot.common.query.context.TimerContext) TableDataManager(com.linkedin.pinot.core.data.manager.offline.TableDataManager) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest)

Example 2 with TimerContext

use of com.linkedin.pinot.common.query.context.TimerContext in project pinot by linkedin.

the class ScheduledRequestHandler method serializeDataTable.

static byte[] serializeDataTable(QueryRequest queryRequest, DataTable instanceResponse) {
    byte[] responseByte;
    InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
    ServerMetrics metrics = queryRequest.getServerMetrics();
    TimerContext timerContext = queryRequest.getTimerContext();
    timerContext.startNewPhaseTimer(ServerQueryPhase.RESPONSE_SERIALIZATION);
    long requestId = instanceRequest != null ? instanceRequest.getRequestId() : -1;
    String brokerId = instanceRequest != null ? instanceRequest.getBrokerId() : "null";
    try {
        if (instanceResponse == null) {
            LOGGER.warn("Instance response is null for requestId: {}, brokerId: {}", requestId, brokerId);
            responseByte = new byte[0];
        } else {
            responseByte = instanceResponse.toBytes();
        }
    } catch (Exception e) {
        metrics.addMeteredGlobalValue(ServerMeter.RESPONSE_SERIALIZATION_EXCEPTIONS, 1);
        LOGGER.error("Got exception while serializing response for requestId: {}, brokerId: {}", requestId, brokerId, e);
        responseByte = null;
    }
    timerContext.getPhaseTimer(ServerQueryPhase.RESPONSE_SERIALIZATION).stopAndRecord();
    timerContext.startNewPhaseTimerAtNs(ServerQueryPhase.TOTAL_QUERY_TIME, timerContext.getQueryArrivalTimeNs());
    timerContext.getPhaseTimer(ServerQueryPhase.TOTAL_QUERY_TIME).stopAndRecord();
    return responseByte;
}
Also used : TimerContext(com.linkedin.pinot.common.query.context.TimerContext) ServerMetrics(com.linkedin.pinot.common.metrics.ServerMetrics) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest) QueryException(com.linkedin.pinot.common.exception.QueryException)

Example 3 with TimerContext

use of com.linkedin.pinot.common.query.context.TimerContext in project pinot by linkedin.

the class ScheduledRequestHandler method processRequest.

@Override
public ListenableFuture<byte[]> processRequest(ChannelHandlerContext channelHandlerContext, ByteBuf request) {
    final long queryStartTimeNs = System.nanoTime();
    serverMetrics.addMeteredGlobalValue(ServerMeter.QUERIES, 1);
    LOGGER.debug("Processing request : {}", request);
    byte[] byteArray = new byte[request.readableBytes()];
    request.readBytes(byteArray);
    SerDe serDe = new SerDe(new TCompactProtocol.Factory());
    final InstanceRequest instanceRequest = new InstanceRequest();
    if (!serDe.deserialize(instanceRequest, byteArray)) {
        LOGGER.error("Failed to deserialize query request from broker ip: {}", ((InetSocketAddress) channelHandlerContext.channel().remoteAddress()).getAddress().getHostAddress());
        DataTable result = new DataTableImplV2();
        result.addException(QueryException.INTERNAL_ERROR);
        serverMetrics.addMeteredGlobalValue(ServerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1);
        QueryRequest queryRequest = new QueryRequest(null, serverMetrics);
        queryRequest.getTimerContext().setQueryArrivalTimeNs(queryStartTimeNs);
        return Futures.immediateFuture(serializeDataTable(queryRequest, result));
    }
    final QueryRequest queryRequest = new QueryRequest(instanceRequest, serverMetrics);
    final TimerContext timerContext = queryRequest.getTimerContext();
    timerContext.setQueryArrivalTimeNs(queryStartTimeNs);
    TimerContext.Timer deserializationTimer = timerContext.startNewPhaseTimerAtNs(ServerQueryPhase.REQUEST_DESERIALIZATION, queryStartTimeNs);
    deserializationTimer.stopAndRecord();
    LOGGER.debug("Processing requestId:{},request={}", instanceRequest.getRequestId(), instanceRequest);
    ListenableFuture<DataTable> queryTask = queryScheduler.submit(queryRequest);
    // following future will provide default response in case of uncaught
    // exceptions from query processing
    ListenableFuture<DataTable> queryResponse = Futures.catching(queryTask, Throwable.class, new Function<Throwable, DataTable>() {

        @Nullable
        @Override
        public DataTable apply(@Nullable Throwable input) {
            // this is called iff queryTask fails with unhandled exception
            serverMetrics.addMeteredGlobalValue(ServerMeter.UNCAUGHT_EXCEPTIONS, 1);
            DataTable result = new DataTableImplV2();
            result.addException(QueryException.INTERNAL_ERROR);
            return result;
        }
    });
    // transform the DataTable to serialized byte[] to send back to broker
    ListenableFuture<byte[]> serializedQueryResponse = Futures.transform(queryResponse, new Function<DataTable, byte[]>() {

        @Nullable
        @Override
        public byte[] apply(@Nullable DataTable instanceResponse) {
            byte[] responseData = serializeDataTable(queryRequest, instanceResponse);
            LOGGER.info("Processed requestId {},reqSegments={},prunedToSegmentCount={},deserTimeMs={},planTimeMs={},planExecTimeMs={},totalExecMs={},serTimeMs={}TotalTimeMs={},broker={}", queryRequest.getInstanceRequest().getRequestId(), queryRequest.getInstanceRequest().getSearchSegments().size(), queryRequest.getSegmentCountAfterPruning(), timerContext.getPhaseDurationMs(ServerQueryPhase.REQUEST_DESERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.BUILD_QUERY_PLAN), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PLAN_EXECUTION), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PROCESSING), timerContext.getPhaseDurationMs(ServerQueryPhase.RESPONSE_SERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.TOTAL_QUERY_TIME), queryRequest.getBrokerId());
            return responseData;
        }
    });
    return serializedQueryResponse;
}
Also used : SerDe(com.linkedin.pinot.serde.SerDe) DataTable(com.linkedin.pinot.common.utils.DataTable) QueryRequest(com.linkedin.pinot.common.query.QueryRequest) InetSocketAddress(java.net.InetSocketAddress) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) TCompactProtocol(org.apache.thrift.protocol.TCompactProtocol) TimerContext(com.linkedin.pinot.common.query.context.TimerContext) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest) Nullable(javax.annotation.Nullable)

Aggregations

TimerContext (com.linkedin.pinot.common.query.context.TimerContext)3 InstanceRequest (com.linkedin.pinot.common.request.InstanceRequest)3 QueryException (com.linkedin.pinot.common.exception.QueryException)2 DataTable (com.linkedin.pinot.common.utils.DataTable)2 DataTableImplV2 (com.linkedin.pinot.core.common.datatable.DataTableImplV2)2 ServerMetrics (com.linkedin.pinot.common.metrics.ServerMetrics)1 QueryRequest (com.linkedin.pinot.common.query.QueryRequest)1 BrokerRequest (com.linkedin.pinot.common.request.BrokerRequest)1 SegmentDataManager (com.linkedin.pinot.core.data.manager.offline.SegmentDataManager)1 TableDataManager (com.linkedin.pinot.core.data.manager.offline.TableDataManager)1 Plan (com.linkedin.pinot.core.plan.Plan)1 SerDe (com.linkedin.pinot.serde.SerDe)1 InetSocketAddress (java.net.InetSocketAddress)1 Map (java.util.Map)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 Nullable (javax.annotation.Nullable)1 ConfigurationException (org.apache.commons.configuration.ConfigurationException)1 TCompactProtocol (org.apache.thrift.protocol.TCompactProtocol)1