Search in sources :

Example 26 with DataTable

use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.

the class BrokerReduceServiceTest method testDistinctCountQuery0.

@Test
public void testDistinctCountQuery0() {
    BrokerRequest brokerRequest = getDistinctCountQuery("dim0");
    QuerySource querySource = new QuerySource();
    querySource.setTableName("midas");
    brokerRequest.setQuerySource(querySource);
    InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
    instanceRequest.setSearchSegments(new ArrayList<String>());
    for (IndexSegment segment : _indexSegmentList) {
        instanceRequest.addToSearchSegments(segment.getSegmentName());
    }
    Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
    try {
        QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
        DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
        instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
        DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
        instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
        BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
        AggregationResult aggregationResult = brokerResponse.getAggregationResults().get(0);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "distinctCount_dim0", 10.0);
        LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
    } catch (Exception e) {
        e.printStackTrace();
        // Should never happen
        throw new RuntimeException(e.toString(), e);
    }
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) QueryRequest(com.linkedin.pinot.common.query.QueryRequest) HashMap(java.util.HashMap) IndexSegment(com.linkedin.pinot.core.indexsegment.IndexSegment) BrokerResponseNative(com.linkedin.pinot.common.response.broker.BrokerResponseNative) AggregationResult(com.linkedin.pinot.common.response.broker.AggregationResult) QuerySource(com.linkedin.pinot.common.request.QuerySource) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest) Test(org.testng.annotations.Test)

Example 27 with DataTable

use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.

the class BrokerReduceServiceTest method testMaxQuery.

@Test
public void testMaxQuery() {
    BrokerRequest brokerRequest = getMaxQuery();
    QuerySource querySource = new QuerySource();
    querySource.setTableName("midas");
    brokerRequest.setQuerySource(querySource);
    InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
    instanceRequest.setSearchSegments(new ArrayList<String>());
    for (IndexSegment segment : _indexSegmentList) {
        instanceRequest.addToSearchSegments(segment.getSegmentName());
    }
    Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
    try {
        QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
        DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
        instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
        DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
        instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
        BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
        LOGGER.info("BrokerResponse is " + brokerResponse.getAggregationResults().get(0));
        LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
    } catch (Exception e) {
        e.printStackTrace();
        // Should never happen
        throw new RuntimeException(e.toString(), e);
    }
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) QueryRequest(com.linkedin.pinot.common.query.QueryRequest) HashMap(java.util.HashMap) IndexSegment(com.linkedin.pinot.core.indexsegment.IndexSegment) BrokerResponseNative(com.linkedin.pinot.common.response.broker.BrokerResponseNative) QuerySource(com.linkedin.pinot.common.request.QuerySource) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest) Test(org.testng.annotations.Test)

Example 28 with DataTable

use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.

the class BrokerReduceServiceTest method testMultiAggregationQuery.

@Test
public void testMultiAggregationQuery() {
    BrokerRequest brokerRequest = getMultiAggregationQuery();
    QuerySource querySource = new QuerySource();
    querySource.setTableName("midas");
    brokerRequest.setQuerySource(querySource);
    InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
    instanceRequest.setSearchSegments(new ArrayList<String>());
    for (IndexSegment segment : _indexSegmentList) {
        instanceRequest.addToSearchSegments(segment.getSegmentName());
    }
    Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
    try {
        QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
        instanceResponseMap.put(new ServerInstance("localhost:0000"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:1111"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:2222"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:3333"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:4444"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:5555"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:6666"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:7777"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:8888"), _queryExecutor.processQuery(queryRequest, queryRunners));
        instanceResponseMap.put(new ServerInstance("localhost:9999"), _queryExecutor.processQuery(queryRequest, queryRunners));
        BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
        AggregationResult aggregationResult = brokerResponse.getAggregationResults().get(0);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "count_star", 4000020.0);
        aggregationResult = brokerResponse.getAggregationResults().get(1);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "sum_met", 400002000000.0);
        aggregationResult = brokerResponse.getAggregationResults().get(2);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "max_met", 200000.0);
        aggregationResult = brokerResponse.getAggregationResults().get(3);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "min_met", 0.0);
        aggregationResult = brokerResponse.getAggregationResults().get(4);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "avg_met", 100000.0);
        aggregationResult = brokerResponse.getAggregationResults().get(5);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "distinctCount_dim0", 10.0);
        aggregationResult = brokerResponse.getAggregationResults().get(6);
        LOGGER.info("BrokerResponse is " + aggregationResult);
        checkAggregationResult(aggregationResult, "distinctCount_dim1", 100.0);
        LOGGER.info("Time Used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
        LOGGER.info("Num Docs Scanned is " + brokerResponse.getNumDocsScanned());
        LOGGER.info("Total Docs for BrokerResponse is " + brokerResponse.getTotalDocs());
    //      System.out.println(brokerResponse.toJson());
    } catch (Exception e) {
        e.printStackTrace();
        // Should never happen
        throw new RuntimeException(e.toString(), e);
    }
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) QueryRequest(com.linkedin.pinot.common.query.QueryRequest) HashMap(java.util.HashMap) IndexSegment(com.linkedin.pinot.core.indexsegment.IndexSegment) BrokerResponseNative(com.linkedin.pinot.common.response.broker.BrokerResponseNative) AggregationResult(com.linkedin.pinot.common.response.broker.AggregationResult) QuerySource(com.linkedin.pinot.common.request.QuerySource) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) ServerInstance(com.linkedin.pinot.common.response.ServerInstance) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest) Test(org.testng.annotations.Test)

Example 29 with DataTable

use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.

the class QueryExecutorTest method testMaxQuery.

@Test
public void testMaxQuery() {
    BrokerRequest brokerRequest = getMaxQuery();
    QuerySource querySource = new QuerySource();
    querySource.setTableName("midas");
    brokerRequest.setQuerySource(querySource);
    InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
    instanceRequest.setSearchSegments(new ArrayList<String>());
    for (IndexSegment segment : _indexSegmentList) {
        instanceRequest.getSearchSegments().add(segment.getSegmentName());
    }
    QueryRequest queryRequest = new QueryRequest(instanceRequest, serverMetrics);
    DataTable instanceResponse = _queryExecutor.processQuery(queryRequest, queryRunners);
    LOGGER.info("InstanceResponse is " + instanceResponse.getDouble(0, 0));
    Assert.assertEquals(instanceResponse.getDouble(0, 0), 200000.0);
    LOGGER.info("Time used for instanceResponse is " + instanceResponse.getMetadata().get(DataTable.TIME_USED_MS_METADATA_KEY));
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) QueryRequest(com.linkedin.pinot.common.query.QueryRequest) IndexSegment(com.linkedin.pinot.core.indexsegment.IndexSegment) QuerySource(com.linkedin.pinot.common.request.QuerySource) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest) Test(org.testng.annotations.Test)

Example 30 with DataTable

use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.

the class ServerQueryExecutorV1Impl method processQuery.

@Override
public DataTable processQuery(final QueryRequest queryRequest, ExecutorService executorService) {
    TimerContext timerContext = queryRequest.getTimerContext();
    TimerContext.Timer schedulerWaitTimer = timerContext.getPhaseTimer(ServerQueryPhase.SCHEDULER_WAIT);
    if (schedulerWaitTimer != null) {
        schedulerWaitTimer.stopAndRecord();
    }
    TimerContext.Timer queryProcessingTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PROCESSING);
    DataTable dataTable;
    List<SegmentDataManager> queryableSegmentDataManagerList = null;
    InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
    final long requestId = instanceRequest.getRequestId();
    try {
        TraceContext.register(instanceRequest);
        final BrokerRequest brokerRequest = instanceRequest.getQuery();
        LOGGER.debug("Incoming query is : {}", brokerRequest);
        TimerContext.Timer segmentPruneTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.SEGMENT_PRUNING);
        final String tableName = instanceRequest.getQuery().getQuerySource().getTableName();
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(tableName);
        queryableSegmentDataManagerList = acquireQueryableSegments(tableDataManager, instanceRequest);
        long totalRawDocs = pruneSegments(tableDataManager, queryableSegmentDataManagerList, instanceRequest.getQuery());
        segmentPruneTimer.stopAndRecord();
        int numSegmentsMatched = queryableSegmentDataManagerList.size();
        queryRequest.setSegmentCountAfterPruning(numSegmentsMatched);
        LOGGER.debug("Matched {} segments", numSegmentsMatched);
        if (numSegmentsMatched == 0) {
            DataTable emptyDataTable = DataTableBuilder.buildEmptyDataTable(brokerRequest);
            emptyDataTable.getMetadata().put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
            return emptyDataTable;
        }
        TimerContext.Timer planBuildTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.BUILD_QUERY_PLAN);
        final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList, brokerRequest, executorService, getResourceTimeOut(instanceRequest.getQuery()));
        planBuildTimer.stopAndRecord();
        if (_printQueryPlan) {
            LOGGER.debug("***************************** Query Plan for Request {} ***********************************", instanceRequest.getRequestId());
            globalQueryPlan.print();
            LOGGER.debug("*********************************** End Query Plan ***********************************");
        }
        TimerContext.Timer planExecTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PLAN_EXECUTION);
        globalQueryPlan.execute();
        planExecTimer.stopAndRecord();
        dataTable = globalQueryPlan.getInstanceResponse();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        queryProcessingTimer.stopAndRecord();
        LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(), queryProcessingTimer.getDurationNs());
        LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationMs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        // Update the total docs in the metadata based on un-pruned segments.
        dataTableMetadata.put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
        return dataTable;
    } catch (Exception e) {
        _serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS, 1);
        LOGGER.error("Exception processing requestId {}", requestId, e);
        dataTable = new DataTableImplV2();
        Map<String, String> dataTableMetadata = dataTable.getMetadata();
        dataTable.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
        TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
        queryProcessingTimer.stopAndRecord();
        LOGGER.info("Searching Instance for Request Id - {}, browse took: {}, instanceResponse: {}", requestId, queryProcessingTimer.getDurationMs(), dataTable.toString());
        dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationNs()));
        dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
        dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
        return dataTable;
    } finally {
        TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(queryRequest.getTableName());
        if (tableDataManager != null && queryableSegmentDataManagerList != null) {
            for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
                tableDataManager.releaseSegment(segmentDataManager);
            }
        }
        TraceContext.unregister(instanceRequest);
    }
}
Also used : DataTable(com.linkedin.pinot.common.utils.DataTable) DataTableImplV2(com.linkedin.pinot.core.common.datatable.DataTableImplV2) Plan(com.linkedin.pinot.core.plan.Plan) QueryException(com.linkedin.pinot.common.exception.QueryException) ConfigurationException(org.apache.commons.configuration.ConfigurationException) SegmentDataManager(com.linkedin.pinot.core.data.manager.offline.SegmentDataManager) TimerContext(com.linkedin.pinot.common.query.context.TimerContext) TableDataManager(com.linkedin.pinot.core.data.manager.offline.TableDataManager) BrokerRequest(com.linkedin.pinot.common.request.BrokerRequest) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) InstanceRequest(com.linkedin.pinot.common.request.InstanceRequest)

Aggregations

DataTable (com.linkedin.pinot.common.utils.DataTable)49 Test (org.testng.annotations.Test)36 QueryRequest (com.linkedin.pinot.common.query.QueryRequest)35 InstanceRequest (com.linkedin.pinot.common.request.InstanceRequest)34 BrokerRequest (com.linkedin.pinot.common.request.BrokerRequest)33 HashMap (java.util.HashMap)27 ServerInstance (com.linkedin.pinot.common.response.ServerInstance)26 BrokerResponseNative (com.linkedin.pinot.common.response.broker.BrokerResponseNative)23 QuerySource (com.linkedin.pinot.common.request.QuerySource)18 IndexSegment (com.linkedin.pinot.core.indexsegment.IndexSegment)12 AggregationResult (com.linkedin.pinot.common.response.broker.AggregationResult)8 BeforeTest (org.testng.annotations.BeforeTest)8 DataSchema (com.linkedin.pinot.common.utils.DataSchema)7 ArrayList (java.util.ArrayList)7 Nonnull (javax.annotation.Nonnull)5 DataTableImplV2 (com.linkedin.pinot.core.common.datatable.DataTableImplV2)4 ByteBuf (io.netty.buffer.ByteBuf)4 Map (java.util.Map)4 AfterTest (org.testng.annotations.AfterTest)4 DataTableBuilder (com.linkedin.pinot.core.common.datatable.DataTableBuilder)3