Search in sources :

Example 1 with DataTableImplV2

use of com.linkedin.pinot.core.common.datatable.DataTableImplV2 in project pinot by linkedin.

the class ServerQueryExecutorV1Impl method processQuery.

@Override
public DataTable processQuery(final QueryRequest queryRequest, ExecutorService executorService) {
    TimerContext timerContext = queryRequest.getTimerContext();
    TimerContext.Timer schedulerWaitTimer = timerContext.getPhaseTimer(ServerQueryPhase.SCHEDULER_WAIT);
    if (schedulerWaitTimer != null) {
        schedulerWaitTimer.stopAndRecord();
    }
    TimerContext.Timer queryProcessingTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PROCESSING);
    DataTable dataTable;
    List<SegmentDataManager> queryableSegmentDataManagerList = null;
    InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
    final long requestId = instanceRequest.getRequestId();
    try {
        TraceContext.register(instanceRequest);
        final BrokerRequest brokerRequest = instanceRequest.getQuery();
        LOGGER.debug("Incoming query is : {}", brokerRequest);
        TimerContext.Timer segmentPruneTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.SEGMENT_PRUNING);
        final String tableName = instanceRequest.getQuery().getQuerySource().getTableName();
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(tableName);
        queryableSegmentDataManagerList = acquireQueryableSegments(tableDataManager, instanceRequest);
        long totalRawDocs = pruneSegments(tableDataManager, queryableSegmentDataManagerList, instanceRequest.getQuery());
        segmentPruneTimer.stopAndRecord();
        int numSegmentsMatched = queryableSegmentDataManagerList.size();
        queryRequest.setSegmentCountAfterPruning(numSegmentsMatched);
        LOGGER.debug("Matched {} segments", numSegmentsMatched);
        if (numSegmentsMatched == 0) {
            DataTable emptyDataTable = DataTableBuilder.buildEmptyDataTable(brokerRequest);
            emptyDataTable.getMetadata().put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
            return emptyDataTable;
        }
        TimerContext.Timer planBuildTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.BUILD_QUERY_PLAN);
        final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList, brokerRequest, executorService, getResourceTimeOut(instanceRequest.getQuery()));
        planBuildTimer.stopAndRecord();
        if (_printQueryPlan) {
            LOGGER.debug("***************************** Query Plan for Request {} ***********************************", instanceRequest.getRequestId());
            globalQueryPlan.print();
            LOGGER.debug("*********************************** End Query Plan ***********************************");
        }
        TimerContext.Timer planExecTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PLAN_EXECUTION);
        globalQueryPlan.execute();
        planExecTimer.stopAndRecord();
        dataTable = globalQueryPlan.getInstanceResponse();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        queryProcessingTimer.stopAndRecord();
        LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(), queryProcessingTimer.getDurationNs());
        LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationMs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        // Update the total docs in the metadata based on un-pruned segments.
        dataTableMetadata.put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
        return dataTable;
    } catch (Exception e) {
        _serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS, 1);
        LOGGER.error("Exception processing requestId {}", requestId, e);
        dataTable = new DataTableImplV2();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        dataTable.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
        TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
        queryProcessingTimer.stopAndRecord();
        LOGGER.info("Searching Instance for Request Id - {}, browse took: {}, instanceResponse: {}", requestId, queryProcessingTimer.getDurationMs(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationNs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        return dataTable;
    } finally {
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(queryRequest.getTableName());
        if (tableDataManager != null && queryableSegmentDataManagerList != null) {
            for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
                tableDataManager.releaseSegment(segmentDataManager);
            }
        }
        TraceContext.unregister(instanceRequest);
    }
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) Plan(com.linkedin.pinot.core.plan.Plan) QueryException(com.linkedin.pinot.common.exception.QueryException) ConfigurationException(org.apache.commons.configuration.ConfigurationException) SegmentDataManager(com.linkedin.pinot.core.data.manager.offline.SegmentDataManager) TimerContext(com.linkedin.pinot.common.query.context.TimerContext) TableDataManager(com.linkedin.pinot.core.data.manager.offline.TableDataManager) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest)

Example 2 with DataTableImplV2

use of com.linkedin.pinot.core.common.datatable.DataTableImplV2 in project pinot by linkedin.

the class ScheduledRequestHandler method processRequest.

@Override
public ListenableFuture<byte[]> processRequest(ChannelHandlerContext channelHandlerContext, ByteBuf request) {
    final long queryStartTimeNs = System.nanoTime();
    serverMetrics.addMeteredGlobalValue(ServerMeter.QUERIES, 1);
    LOGGER.debug("Processing request : {}", request);
    byte[] byteArray = new byte[request.readableBytes()];
    request.readBytes(byteArray);
    SerDe serDe = new SerDe(new TCompactProtocol.Factory());
    final InstanceRequest instanceRequest = new InstanceRequest();
    if (!serDe.deserialize(instanceRequest, byteArray)) {
        LOGGER.error("Failed to deserialize query request from broker ip: {}", ((InetSocketAddress) channelHandlerContext.channel().remoteAddress()).getAddress().getHostAddress());
        DataTable result = new DataTableImplV2();
        result.addException(QueryException.INTERNAL_ERROR);
        serverMetrics.addMeteredGlobalValue(ServerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1);
        QueryRequest queryRequest = new QueryRequest(null, serverMetrics);
        queryRequest.getTimerContext().setQueryArrivalTimeNs(queryStartTimeNs);
        return Futures.immediateFuture(serializeDataTable(queryRequest, result));
    }
    final QueryRequest queryRequest = new QueryRequest(instanceRequest, serverMetrics);
    final TimerContext timerContext = queryRequest.getTimerContext();
    timerContext.setQueryArrivalTimeNs(queryStartTimeNs);
    TimerContext.Timer deserializationTimer = timerContext.startNewPhaseTimerAtNs(ServerQueryPhase.REQUEST_DESERIALIZATION, queryStartTimeNs);
    deserializationTimer.stopAndRecord();
    LOGGER.debug("Processing requestId:{},request={}", instanceRequest.getRequestId(), instanceRequest);
    ListenableFuture<DataTable> queryTask = queryScheduler.submit(queryRequest);
    // following future will provide default response in case of uncaught
    // exceptions from query processing
    ListenableFuture<DataTable> queryResponse = Futures.catching(queryTask, Throwable.class, new Function<Throwable, DataTable>() {

        @Nullable
        @Override
        public DataTable apply(@Nullable Throwable input) {
            // this is called iff queryTask fails with unhandled exception
            serverMetrics.addMeteredGlobalValue(ServerMeter.UNCAUGHT_EXCEPTIONS, 1);
            DataTable result = new DataTableImplV2();
            result.addException(QueryException.INTERNAL_ERROR);
            return result;
        }
    });
    // transform the DataTable to serialized byte[] to send back to broker
    ListenableFuture<byte[]> serializedQueryResponse = Futures.transform(queryResponse, new Function<DataTable, byte[]>() {

        @Nullable
        @Override
        public byte[] apply(@Nullable DataTable instanceResponse) {
            byte[] responseData = serializeDataTable(queryRequest, instanceResponse);
            LOGGER.info("Processed requestId {},reqSegments={},prunedToSegmentCount={},deserTimeMs={},planTimeMs={},planExecTimeMs={},totalExecMs={},serTimeMs={}TotalTimeMs={},broker={}", queryRequest.getInstanceRequest().getRequestId(), queryRequest.getInstanceRequest().getSearchSegments().size(), queryRequest.getSegmentCountAfterPruning(), timerContext.getPhaseDurationMs(ServerQueryPhase.REQUEST_DESERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.BUILD_QUERY_PLAN), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PLAN_EXECUTION), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PROCESSING), timerContext.getPhaseDurationMs(ServerQueryPhase.RESPONSE_SERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.TOTAL_QUERY_TIME), queryRequest.getBrokerId());
            return responseData;
        }
    });
    return serializedQueryResponse;
}
Also used : SerDe(com.linkedin.pinot.serde.SerDe) DataTable(com.linkedin.pinot.common.utils.DataTable) QueryRequest(com.linkedin.pinot.common.query.QueryRequest) InetSocketAddress(java.net.InetSocketAddress) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) TCompactProtocol(org.apache.thrift.protocol.TCompactProtocol) TimerContext(com.linkedin.pinot.common.query.context.TimerContext) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest) Nullable(javax.annotation.Nullable)

Example 3 with DataTableImplV2

use of com.linkedin.pinot.core.common.datatable.DataTableImplV2 in project pinot by linkedin.

the class ScheduledRequestHandlerTest method testQueryProcessingException.

@Test
public void testQueryProcessingException() throws Exception {
    ScheduledRequestHandler handler = new ScheduledRequestHandler(new QueryScheduler(queryExecutor) {

        @Override
        public ListenableFuture<DataTable> submit(QueryRequest queryRequest) {
            return queryWorkers.submit(new Callable<DataTable>() {

                @Override
                public DataTable call() throws Exception {
                    throw new RuntimeException("query processing error");
                }
            });
        }
    }, serverMetrics);
    ByteBuf requestBuf = getSerializedInstanceRequest(getInstanceRequest());
    ListenableFuture<byte[]> responseFuture = handler.processRequest(channelHandlerContext, requestBuf);
    byte[] bytes = responseFuture.get(2, TimeUnit.SECONDS);
    // we get DataTable with exception information in case of query processing exception
    Assert.assertTrue(bytes.length > 0);
    DataTable expectedDT = new DataTableImplV2();
    expectedDT.addException(QueryException.INTERNAL_ERROR);
    Assert.assertEquals(bytes, expectedDT.toBytes());
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) QueryScheduler(com.linkedin.pinot.core.query.scheduler.QueryScheduler) QueryRequest(com.linkedin.pinot.common.query.QueryRequest) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) ByteBuf(io.netty.buffer.ByteBuf) Callable(java.util.concurrent.Callable) Test(org.testng.annotations.Test) BeforeTest(org.testng.annotations.BeforeTest)

Example 4 with DataTableImplV2

use of com.linkedin.pinot.core.common.datatable.DataTableImplV2 in project pinot by linkedin.

the class ScheduledRequestHandlerTest method testBadRequest.

@Test
public void testBadRequest() throws Exception {
    ScheduledRequestHandler handler = new ScheduledRequestHandler(queryScheduler, serverMetrics);
    String requestBadString = "foobar";
    byte[] requestData = requestBadString.getBytes();
    ByteBuf buffer = Unpooled.wrappedBuffer(requestData);
    ListenableFuture<byte[]> response = handler.processRequest(channelHandlerContext, buffer);
    // The handler method is expected to return immediately
    Assert.assertTrue(response.isDone());
    byte[] responseBytes = response.get();
    Assert.assertTrue(responseBytes.length > 0);
    DataTable expectedDT = new DataTableImplV2();
    expectedDT.addException(QueryException.INTERNAL_ERROR);
    Assert.assertEquals(responseBytes, expectedDT.toBytes());
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) ByteBuf(io.netty.buffer.ByteBuf) Test(org.testng.annotations.Test) BeforeTest(org.testng.annotations.BeforeTest)

Aggregations

DataTable (com.linkedin.pinot.common.utils.DataTable)4 DataTableImplV2 (com.linkedin.pinot.core.common.datatable.DataTableImplV2)4 QueryRequest (com.linkedin.pinot.common.query.QueryRequest)2 TimerContext (com.linkedin.pinot.common.query.context.TimerContext)2 InstanceRequest (com.linkedin.pinot.common.request.InstanceRequest)2 ByteBuf (io.netty.buffer.ByteBuf)2 BeforeTest (org.testng.annotations.BeforeTest)2 Test (org.testng.annotations.Test)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)1 QueryException (com.linkedin.pinot.common.exception.QueryException)1 BrokerRequest (com.linkedin.pinot.common.request.BrokerRequest)1 SegmentDataManager (com.linkedin.pinot.core.data.manager.offline.SegmentDataManager)1 TableDataManager (com.linkedin.pinot.core.data.manager.offline.TableDataManager)1 Plan (com.linkedin.pinot.core.plan.Plan)1 QueryScheduler (com.linkedin.pinot.core.query.scheduler.QueryScheduler)1 SerDe (com.linkedin.pinot.serde.SerDe)1 InetSocketAddress (java.net.InetSocketAddress)1 Map (java.util.Map)1 Callable (java.util.concurrent.Callable)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1