use of com.linkedin.pinot.common.request.BrokerRequest in project pinot by linkedin.
the class QueryExecutorTest method testMaxQuery.
@Test
public void testMaxQuery() {
BrokerRequest brokerRequest = getMaxQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.getSearchSegments().add(segment.getSegmentName());
}
QueryRequest queryRequest = new QueryRequest(instanceRequest, serverMetrics);
DataTable instanceResponse = _queryExecutor.processQuery(queryRequest, queryRunners);
LOGGER.info("InstanceResponse is " + instanceResponse.getDouble(0, 0));
Assert.assertEquals(instanceResponse.getDouble(0, 0), 200000.0);
LOGGER.info("Time used for instanceResponse is " + instanceResponse.getMetadata().get(DataTable.TIME_USED_MS_METADATA_KEY));
}
use of com.linkedin.pinot.common.request.BrokerRequest in project pinot by linkedin.
the class ScanBasedQueryProcessor method processQuery.
public QueryResponse processQuery(String query) throws Exception {
long startTimeInMillis = System.currentTimeMillis();
Pql2Compiler pql2Compiler = new Pql2Compiler();
BrokerRequest brokerRequest = pql2Compiler.compileToBrokerRequest(query);
ResultTable results = null;
Aggregation aggregation = null;
List<String> groupByColumns;
List<AggregationInfo> aggregationsInfo = brokerRequest.getAggregationsInfo();
if (aggregationsInfo != null) {
GroupBy groupBy = brokerRequest.getGroupBy();
groupByColumns = (brokerRequest.isSetGroupBy()) ? groupBy.getColumns() : null;
long topN = (groupByColumns != null) ? groupBy.getTopN() : 10;
aggregation = new Aggregation(brokerRequest.getAggregationsInfo(), groupByColumns, topN);
}
int numDocsScanned = 0;
int totalDocs = 0;
int numSegments = 0;
LOGGER.info("Processing Query: {}", query);
List<ResultTable> resultTables = processSegments(query, brokerRequest);
for (ResultTable segmentResults : resultTables) {
numDocsScanned += segmentResults.getNumDocsScanned();
totalDocs += segmentResults.getTotalDocs();
++numSegments;
results = (results == null) ? segmentResults : results.append(segmentResults);
}
if (aggregation != null && numSegments > 1 && numDocsScanned > 0) {
results = aggregation.aggregate(results);
}
if (results != null) {
results.setNumDocsScanned(numDocsScanned);
results.setTotalDocs(totalDocs);
long totalUsedMs = System.currentTimeMillis() - startTimeInMillis;
results.setProcessingTime(totalUsedMs);
results.seal();
}
QueryResponse queryResponse = new QueryResponse(results);
return queryResponse;
}
use of com.linkedin.pinot.common.request.BrokerRequest in project pinot by linkedin.
the class ServerQueryExecutorV1Impl method processQuery.
@Override
public DataTable processQuery(final QueryRequest queryRequest, ExecutorService executorService) {
TimerContext timerContext = queryRequest.getTimerContext();
TimerContext.Timer schedulerWaitTimer = timerContext.getPhaseTimer(ServerQueryPhase.SCHEDULER_WAIT);
if (schedulerWaitTimer != null) {
schedulerWaitTimer.stopAndRecord();
}
TimerContext.Timer queryProcessingTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PROCESSING);
DataTable dataTable;
List<SegmentDataManager> queryableSegmentDataManagerList = null;
InstanceRequest instanceRequest = queryRequest.getInstanceRequest();
final long requestId = instanceRequest.getRequestId();
try {
TraceContext.register(instanceRequest);
final BrokerRequest brokerRequest = instanceRequest.getQuery();
LOGGER.debug("Incoming query is : {}", brokerRequest);
TimerContext.Timer segmentPruneTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.SEGMENT_PRUNING);
final String tableName = instanceRequest.getQuery().getQuerySource().getTableName();
TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(tableName);
queryableSegmentDataManagerList = acquireQueryableSegments(tableDataManager, instanceRequest);
long totalRawDocs = pruneSegments(tableDataManager, queryableSegmentDataManagerList, instanceRequest.getQuery());
segmentPruneTimer.stopAndRecord();
int numSegmentsMatched = queryableSegmentDataManagerList.size();
queryRequest.setSegmentCountAfterPruning(numSegmentsMatched);
LOGGER.debug("Matched {} segments", numSegmentsMatched);
if (numSegmentsMatched == 0) {
DataTable emptyDataTable = DataTableBuilder.buildEmptyDataTable(brokerRequest);
emptyDataTable.getMetadata().put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
return emptyDataTable;
}
TimerContext.Timer planBuildTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.BUILD_QUERY_PLAN);
final Plan globalQueryPlan = _planMaker.makeInterSegmentPlan(queryableSegmentDataManagerList, brokerRequest, executorService, getResourceTimeOut(instanceRequest.getQuery()));
planBuildTimer.stopAndRecord();
if (_printQueryPlan) {
LOGGER.debug("***************************** Query Plan for Request {} ***********************************", instanceRequest.getRequestId());
globalQueryPlan.print();
LOGGER.debug("*********************************** End Query Plan ***********************************");
}
TimerContext.Timer planExecTimer = timerContext.startNewPhaseTimer(ServerQueryPhase.QUERY_PLAN_EXECUTION);
globalQueryPlan.execute();
planExecTimer.stopAndRecord();
dataTable = globalQueryPlan.getInstanceResponse();
Map<String, String> dataTableMetadata = dataTable.getMetadata();
queryProcessingTimer.stopAndRecord();
LOGGER.debug("Searching Instance for Request Id - {}, browse took: {}", instanceRequest.getRequestId(), queryProcessingTimer.getDurationNs());
LOGGER.debug("InstanceResponse for Request Id - {} : {}", instanceRequest.getRequestId(), dataTable.toString());
dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationMs()));
dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
// Update the total docs in the metadata based on un-pruned segments.
dataTableMetadata.put(DataTable.TOTAL_DOCS_METADATA_KEY, String.valueOf(totalRawDocs));
return dataTable;
} catch (Exception e) {
_serverMetrics.addMeteredQueryValue(instanceRequest.getQuery(), ServerMeter.QUERY_EXECUTION_EXCEPTIONS, 1);
LOGGER.error("Exception processing requestId {}", requestId, e);
dataTable = new DataTableImplV2();
Map<String, String> dataTableMetadata = dataTable.getMetadata();
dataTable.addException(QueryException.getException(QueryException.QUERY_EXECUTION_ERROR, e));
TraceContext.logException("ServerQueryExecutorV1Impl", "Exception occurs in processQuery");
queryProcessingTimer.stopAndRecord();
LOGGER.info("Searching Instance for Request Id - {}, browse took: {}, instanceResponse: {}", requestId, queryProcessingTimer.getDurationMs(), dataTable.toString());
dataTableMetadata.put(DataTable.TIME_USED_MS_METADATA_KEY, Long.toString(queryProcessingTimer.getDurationNs()));
dataTableMetadata.put(DataTable.REQUEST_ID_METADATA_KEY, Long.toString(instanceRequest.getRequestId()));
dataTableMetadata.put(DataTable.TRACE_INFO_METADATA_KEY, TraceContext.getTraceInfoOfRequestId(instanceRequest.getRequestId()));
return dataTable;
} finally {
TableDataManager tableDataManager = _instanceDataManager.getTableDataManager(queryRequest.getTableName());
if (tableDataManager != null && queryableSegmentDataManagerList != null) {
for (SegmentDataManager segmentDataManager : queryableSegmentDataManagerList) {
tableDataManager.releaseSegment(segmentDataManager);
}
}
TraceContext.unregister(instanceRequest);
}
}
use of com.linkedin.pinot.common.request.BrokerRequest in project pinot by linkedin.
the class BaseSumStarTreeIndexTest method testHardCodedQueries.
protected void testHardCodedQueries(IndexSegment segment, Schema schema) {
// Test against all metric columns, instead of just the aggregation column in the query.
List<String> metricNames = schema.getMetricNames();
SegmentMetadata segmentMetadata = segment.getSegmentMetadata();
for (int i = 0; i < _hardCodedQueries.length; i++) {
Pql2Compiler compiler = new Pql2Compiler();
BrokerRequest brokerRequest = compiler.compileToBrokerRequest(_hardCodedQueries[i]);
FilterQueryTree filterQueryTree = RequestUtils.generateFilterQueryTree(brokerRequest);
Assert.assertTrue(RequestUtils.isFitForStarTreeIndex(segmentMetadata, filterQueryTree, brokerRequest));
Map<String, double[]> expectedResult = computeSumUsingRawDocs(segment, metricNames, brokerRequest);
Map<String, double[]> actualResult = computeSumUsingAggregatedDocs(segment, metricNames, brokerRequest);
Assert.assertEquals(expectedResult.size(), actualResult.size(), "Mis-match in number of groups");
for (Map.Entry<String, double[]> entry : expectedResult.entrySet()) {
String expectedKey = entry.getKey();
Assert.assertTrue(actualResult.containsKey(expectedKey));
double[] expectedSums = entry.getValue();
double[] actualSums = actualResult.get(expectedKey);
for (int j = 0; j < expectedSums.length; j++) {
Assert.assertEquals(actualSums[j], expectedSums[j], "Mis-match sum for key '" + expectedKey + "', Metric: " + metricNames.get(j) + ", Random Seed: " + _randomSeed);
}
}
}
}
use of com.linkedin.pinot.common.request.BrokerRequest in project pinot by linkedin.
the class ApproximateQueryTestUtil method runQuery.
public static Object runQuery(QueryExecutor queryExecutor, List<String> segments, AvroQueryGenerator.TestAggreationQuery query, ServerMetrics metrics) {
LOGGER.info("\nRunning: " + query.getPql());
final BrokerRequest brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(query.getPql());
InstanceRequest instanceRequest = new InstanceRequest(counter++, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (String segment : segments) {
instanceRequest.getSearchSegments().add(segment);
}
QueryRequest queryRequest = new QueryRequest(instanceRequest, metrics);
final DataTable instanceResponse = queryExecutor.processQuery(queryRequest, queryRunners);
final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse);
final BrokerResponseNative brokerResponse = REDUCE_SERVICE.reduceOnDataTable(brokerRequest, instanceResponseMap);
AggregationResult result = brokerResponse.getAggregationResults().get(0);
Assert.assertNotNull(result);
if (result.getValue() != null) {
LOGGER.info("Aggregation Result is " + result.getValue().toString());
} else if (result.getGroupByResult() != null) {
LOGGER.info("GroupBy Result is " + result.getGroupByResult().toString());
} else {
throw new RuntimeException("Aggregation and GroupBy Results both null.");
}
// compute value
Object val;
if (query instanceof AvroQueryGenerator.TestSimpleAggreationQuery) {
val = Double.parseDouble(brokerResponse.getAggregationResults().get(0).getValue().toString());
} else {
val = brokerResponse.getAggregationResults().get(0).getGroupByResult();
}
return val;
}
Aggregations