use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class BrokerReduceServiceTest method testDistinctCountQuery0.
@Test
public void testDistinctCountQuery0() {
BrokerRequest brokerRequest = getDistinctCountQuery("dim0");
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
AggregationResult aggregationResult = brokerResponse.getAggregationResults().get(0);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "distinctCount_dim0", 10.0);
LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
} catch (Exception e) {
e.printStackTrace();
// Should never happen
throw new RuntimeException(e.toString(), e);
}
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class BrokerReduceServiceTest method testMaxQuery.
@Test
public void testMaxQuery() {
BrokerRequest brokerRequest = getMaxQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
LOGGER.info("BrokerResponse is " + brokerResponse.getAggregationResults().get(0));
LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
} catch (Exception e) {
e.printStackTrace();
// Should never happen
throw new RuntimeException(e.toString(), e);
}
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class BrokerReduceServiceTest method testMultiAggregationQuery.
@Test
public void testMultiAggregationQuery() {
BrokerRequest brokerRequest = getMultiAggregationQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
instanceResponseMap.put(new ServerInstance("localhost:0000"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:1111"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:2222"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:3333"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:4444"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:5555"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:6666"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:7777"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:8888"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:9999"), _queryExecutor.processQuery(queryRequest, queryRunners));
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
AggregationResult aggregationResult = brokerResponse.getAggregationResults().get(0);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "count_star", 4000020.0);
aggregationResult = brokerResponse.getAggregationResults().get(1);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "sum_met", 400002000000.0);
aggregationResult = brokerResponse.getAggregationResults().get(2);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "max_met", 200000.0);
aggregationResult = brokerResponse.getAggregationResults().get(3);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "min_met", 0.0);
aggregationResult = brokerResponse.getAggregationResults().get(4);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "avg_met", 100000.0);
aggregationResult = brokerResponse.getAggregationResults().get(5);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "distinctCount_dim0", 10.0);
aggregationResult = brokerResponse.getAggregationResults().get(6);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "distinctCount_dim1", 100.0);
LOGGER.info("Time Used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
LOGGER.info("Num Docs Scanned is " + brokerResponse.getNumDocsScanned());
LOGGER.info("Total Docs for BrokerResponse is " + brokerResponse.getTotalDocs());
// System.out.println(brokerResponse.toJson());
} catch (Exception e) {
e.printStackTrace();
// Should never happen
throw new RuntimeException(e.toString(), e);
}
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class QueryExecutorTest method testMaxQuery.
@Test
public void testMaxQuery() {
BrokerRequest brokerRequest = getMaxQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.getSearchSegments().add(segment.getSegmentName());
}
QueryRequest queryRequest = new QueryRequest(instanceRequest, serverMetrics);
DataTable instanceResponse = _queryExecutor.processQuery(queryRequest, queryRunners);
LOGGER.info("InstanceResponse is " + instanceResponse.getDouble(0, 0));
Assert.assertEquals(instanceResponse.getDouble(0, 0), 200000.0);
LOGGER.info("Time used for instanceResponse is " + instanceResponse.getMetadata().get(DataTable.TIME_USED_MS_METADATA_KEY));
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class ServerQueryExecutorV1Impl method processQuery.
@Override
public DataTable processQuery(final QueryRequest queryRequest, ExecutorService executorService) {
TimerContext timerContext = queryRequest.getTimerContext();
TimerContext.Timer schedulerWaitTimer = timerContext.getPhaseTimer(ServerQueryPhase.SCHEDULER_WAIT);
if (schedulerWaitTimer != null) {
schedulerWaitTimer.stopAndRecord();
}
TimerContext.Timer queryProcessingTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PROCESSING);
DataTable dataTable;
List<SegmentDataManager> queryableSegmentDataManagerList = null;
InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
final long requestId = instanceRequest.getRequestId();
try {
TraceContext.register(instanceRequest);
final BrokerRequest brokerRequest = instanceRequest.getQuery();
LOGGER.debug("Incoming query is : {}", brokerRequest);
TimerContext.Timer segmentPruneTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.SEGMENT_PRUNING);
final String tableName = instanceRequest.getQuery().getQuerySource().getTableName();
TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(tableName);
queryableSegmentDataManagerList = acquireQueryableSegments(tableDataManager, instanceRequest);
long totalRawDocs = pruneSegments(tableDataManager, queryableSegmentDataManagerList, instanceRequest.getQuery());
segmentPruneTimer.stopAndRecord();
int numSegmentsMatched = queryableSegmentDataManagerList.size();
queryRequest.setSegmentCountAfterPruning(numSegmentsMatched);
LOGGER.debug("Matched {} segments", numSegmentsMatched);
if (numSegmentsMatched == 0) {
DataTable emptyDataTable = DataTableBuilder.buildEmptyDataTable(brokerRequest);
emptyDataTable.getMetadata().put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
return emptyDataTable;
}
TimerContext.Timer planBuildTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.BUILD_QUERY_PLAN);
final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList, brokerRequest, executorService, getResourceTimeOut(instanceRequest.getQuery()));
planBuildTimer.stopAndRecord();
if (_printQueryPlan) {
LOGGER.debug("***************************** Query Plan for Request {} ***********************************", instanceRequest.getRequestId());
globalQueryPlan.print();
LOGGER.debug("*********************************** End Query Plan ***********************************");
}
TimerContext.Timer planExecTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PLAN_EXECUTION);
globalQueryPlan.execute();
planExecTimer.stopAndRecord();
dataTable = globalQueryPlan.getInstanceResponse();
Map<String, String> dataTableMetadata = dataTable.getMetadata();
queryProcessingTimer.stopAndRecord();
LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(), queryProcessingTimer.getDurationNs());
LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(), dataTable.toString());
dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationMs()));
dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
// Update the total docs in the metadata based on un-pruned segments.
dataTableMetadata.put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
return dataTable;
} catch (Exception e) {
_serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS, 1);
LOGGER.error("Exception processing requestId {}", requestId, e);
dataTable = new DataTableImplV2();
Map<String, String> dataTableMetadata = dataTable.getMetadata();
dataTable.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
queryProcessingTimer.stopAndRecord();
LOGGER.info("Searching Instance for Request Id - {}, browse took: {}, instanceResponse: {}", requestId, queryProcessingTimer.getDurationMs(), dataTable.toString());
dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationNs()));
dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
return dataTable;
} finally {
TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(queryRequest.getTableName());
if (tableDataManager != null && queryableSegmentDataManagerList != null) {
for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
tableDataManager.releaseSegment(segmentDataManager);
}
}
TraceContext.unregister(instanceRequest);
}
}
Aggregations