use of com.linkedin.pinot.common.response.broker.BrokerResponseNative in project pinot by linkedin.
the class BrokerReduceServiceTest method testCountQuery.
@Test
public void testCountQuery() {
BrokerRequest brokerRequest = getCountQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
AggregationResult aggregationResult = brokerResponse.getAggregationResults().get(0);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "count_star", 800004.0);
LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
}
use of com.linkedin.pinot.common.response.broker.BrokerResponseNative in project pinot by linkedin.
the class BrokerReduceServiceTest method testDistinctCountQuery0.
@Test
public void testDistinctCountQuery0() {
BrokerRequest brokerRequest = getDistinctCountQuery("dim0");
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
AggregationResult aggregationResult = brokerResponse.getAggregationResults().get(0);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "distinctCount_dim0", 10.0);
LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
} catch (Exception e) {
e.printStackTrace();
// Should never happen
throw new RuntimeException(e.toString(), e);
}
}
use of com.linkedin.pinot.common.response.broker.BrokerResponseNative in project pinot by linkedin.
the class BrokerReduceServiceTest method testMaxQuery.
@Test
public void testMaxQuery() {
BrokerRequest brokerRequest = getMaxQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse1 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse1);
DataTable instanceResponse2 = _queryExecutor.processQuery(queryRequest, queryRunners);
instanceResponseMap.put(new ServerInstance("localhost:1111"), instanceResponse2);
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
LOGGER.info("BrokerResponse is " + brokerResponse.getAggregationResults().get(0));
LOGGER.info("Time used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
} catch (Exception e) {
e.printStackTrace();
// Should never happen
throw new RuntimeException(e.toString(), e);
}
}
use of com.linkedin.pinot.common.response.broker.BrokerResponseNative in project pinot by linkedin.
the class BrokerReduceServiceTest method testMultiAggregationQuery.
@Test
public void testMultiAggregationQuery() {
BrokerRequest brokerRequest = getMultiAggregationQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("midas");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (IndexSegment segment : _indexSegmentList) {
instanceRequest.addToSearchSegments(segment.getSegmentName());
}
Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
instanceResponseMap.put(new ServerInstance("localhost:0000"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:1111"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:2222"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:3333"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:4444"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:5555"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:6666"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:7777"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:8888"), _queryExecutor.processQuery(queryRequest, queryRunners));
instanceResponseMap.put(new ServerInstance("localhost:9999"), _queryExecutor.processQuery(queryRequest, queryRunners));
BrokerResponseNative brokerResponse = _reduceService.reduceOnDataTable(brokerRequest, instanceResponseMap);
AggregationResult aggregationResult = brokerResponse.getAggregationResults().get(0);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "count_star", 4000020.0);
aggregationResult = brokerResponse.getAggregationResults().get(1);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "sum_met", 400002000000.0);
aggregationResult = brokerResponse.getAggregationResults().get(2);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "max_met", 200000.0);
aggregationResult = brokerResponse.getAggregationResults().get(3);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "min_met", 0.0);
aggregationResult = brokerResponse.getAggregationResults().get(4);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "avg_met", 100000.0);
aggregationResult = brokerResponse.getAggregationResults().get(5);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "distinctCount_dim0", 10.0);
aggregationResult = brokerResponse.getAggregationResults().get(6);
LOGGER.info("BrokerResponse is " + aggregationResult);
checkAggregationResult(aggregationResult, "distinctCount_dim1", 100.0);
LOGGER.info("Time Used for BrokerResponse is " + brokerResponse.getTimeUsedMs());
LOGGER.info("Num Docs Scanned is " + brokerResponse.getNumDocsScanned());
LOGGER.info("Total Docs for BrokerResponse is " + brokerResponse.getTotalDocs());
// System.out.println(brokerResponse.toJson());
} catch (Exception e) {
e.printStackTrace();
// Should never happen
throw new RuntimeException(e.toString(), e);
}
}
use of com.linkedin.pinot.common.response.broker.BrokerResponseNative in project pinot by linkedin.
the class BrokerReduceService method reduceOnDataTable.
@Nonnull
@Override
public BrokerResponseNative reduceOnDataTable(@Nonnull BrokerRequest brokerRequest, @Nonnull Map<ServerInstance, DataTable> dataTableMap, @Nullable BrokerMetrics brokerMetrics) {
if (dataTableMap.size() == 0) {
// Empty response.
return BrokerResponseNative.empty();
}
BrokerResponseNative brokerResponseNative = new BrokerResponseNative();
List<QueryProcessingException> processingExceptions = brokerResponseNative.getProcessingExceptions();
long numDocsScanned = 0L;
long numEntriesScannedInFilter = 0L;
long numEntriesScannedPostFilter = 0L;
long numTotalRawDocs = 0L;
// Cache a data schema from data tables (try to cache one with data rows associated with it).
DataSchema cachedDataSchema = null;
// Process server response metadata.
Iterator<Map.Entry<ServerInstance, DataTable>> iterator = dataTableMap.entrySet().iterator();
while (iterator.hasNext()) {
Map.Entry<ServerInstance, DataTable> entry = iterator.next();
ServerInstance serverInstance = entry.getKey();
DataTable dataTable = entry.getValue();
Map<String, String> metadata = dataTable.getMetadata();
// Reduce on trace info.
if (brokerRequest.isEnableTrace()) {
brokerResponseNative.getTraceInfo().put(serverInstance.getHostname(), metadata.get(DataTable.TRACE_INFO_METADATA_KEY));
}
// Reduce on exceptions.
for (String key : metadata.keySet()) {
if (key.startsWith(DataTable.EXCEPTION_METADATA_KEY)) {
processingExceptions.add(new QueryProcessingException(Integer.parseInt(key.substring(9)), metadata.get(key)));
}
}
// Reduce on execution statistics.
String numDocsScannedString = metadata.get(DataTable.NUM_DOCS_SCANNED_METADATA_KEY);
if (numDocsScannedString != null) {
numDocsScanned += Long.parseLong(numDocsScannedString);
}
String numEntriesScannedInFilterString = metadata.get(DataTable.NUM_ENTRIES_SCANNED_IN_FILTER_METADATA_KEY);
if (numEntriesScannedInFilterString != null) {
numEntriesScannedInFilter += Long.parseLong(numEntriesScannedInFilterString);
}
String numEntriesScannedPostFilterString = metadata.get(DataTable.NUM_ENTRIES_SCANNED_POST_FILTER_METADATA_KEY);
if (numEntriesScannedPostFilterString != null) {
numEntriesScannedPostFilter += Long.parseLong(numEntriesScannedPostFilterString);
}
String numTotalRawDocsString = metadata.get(DataTable.TOTAL_DOCS_METADATA_KEY);
if (numTotalRawDocsString != null) {
numTotalRawDocs += Long.parseLong(numTotalRawDocsString);
}
// After processing the metadata, remove data tables without data rows inside.
DataSchema dataSchema = dataTable.getDataSchema();
if (dataSchema == null) {
iterator.remove();
} else {
// Try to cache a data table with data rows inside, or cache one with data schema inside.
if (dataTable.getNumberOfRows() == 0) {
if (cachedDataSchema == null) {
cachedDataSchema = dataSchema;
}
iterator.remove();
} else {
cachedDataSchema = dataSchema;
}
}
}
// Set execution statistics.
brokerResponseNative.setNumDocsScanned(numDocsScanned);
brokerResponseNative.setNumEntriesScannedInFilter(numEntriesScannedInFilter);
brokerResponseNative.setNumEntriesScannedPostFilter(numEntriesScannedPostFilter);
brokerResponseNative.setTotalDocs(numTotalRawDocs);
// Update broker metrics.
String tableName = brokerRequest.getQuerySource().getTableName();
if (brokerMetrics != null) {
brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.DOCUMENTS_SCANNED, numDocsScanned);
brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.ENTRIES_SCANNED_IN_FILTER, numEntriesScannedInFilter);
brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.ENTRIES_SCANNED_POST_FILTER, numEntriesScannedPostFilter);
}
if (dataTableMap.isEmpty()) {
// This will only happen to selection query.
if (cachedDataSchema != null) {
List<String> selectionColumns = SelectionOperatorUtils.getSelectionColumns(brokerRequest.getSelections().getSelectionColumns(), cachedDataSchema);
brokerResponseNative.setSelectionResults(new SelectionResults(selectionColumns, new ArrayList<Serializable[]>(0)));
}
} else {
// Reduce server responses data and set query results into the broker response.
assert cachedDataSchema != null;
if (brokerRequest.isSetSelections()) {
// Selection query.
// For data table map with more than one data tables, remove conflicting data tables.
DataSchema masterDataSchema = cachedDataSchema.clone();
if (dataTableMap.size() > 1) {
List<String> droppedServers = removeConflictingResponses(masterDataSchema, dataTableMap);
if (!droppedServers.isEmpty()) {
String errorMessage = QueryException.MERGE_RESPONSE_ERROR.getMessage() + ": responses for table: " + tableName + " from servers: " + droppedServers + " got dropped due to data schema inconsistency.";
LOGGER.error(errorMessage);
if (brokerMetrics != null) {
brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.RESPONSE_MERGE_EXCEPTIONS, 1);
}
brokerResponseNative.addToExceptions(new QueryProcessingException(QueryException.MERGE_RESPONSE_ERROR_CODE, errorMessage));
}
}
setSelectionResults(brokerResponseNative, brokerRequest.getSelections(), dataTableMap, masterDataSchema);
} else {
// Aggregation query.
AggregationFunction[] aggregationFunctions = AggregationFunctionUtils.getAggregationFunctions(brokerRequest.getAggregationsInfo());
if (!brokerRequest.isSetGroupBy()) {
// Aggregation only query.
setAggregationResults(brokerResponseNative, aggregationFunctions, dataTableMap, cachedDataSchema);
} else {
// Aggregation group-by query.
setGroupByResults(brokerResponseNative, aggregationFunctions, brokerRequest.getGroupBy(), dataTableMap);
}
}
}
return brokerResponseNative;
}
Aggregations