use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class IntegrationTest method testMaxQuery.
@Test
public void testMaxQuery() {
BrokerRequest brokerRequest = getMaxQuery();
QuerySource querySource = new QuerySource();
querySource.setTableName("testTable");
brokerRequest.setQuerySource(querySource);
InstanceRequest instanceRequest = new InstanceRequest(0, brokerRequest);
addTestTableSearchSegmentsToInstanceRequest(instanceRequest);
try {
QueryRequest queryRequest = new QueryRequest(instanceRequest, _serverInstance.getServerMetrics());
DataTable instanceResponse = _queryExecutor.processQuery(queryRequest, queryRunners);
// System.out.println(instanceResponse.getDouble(0, 0));
// System.out.println(instanceResponse.getMetadata().get(DataTable.TIME_USED_MS_METADATA_KEY));
} catch (Exception e) {
e.printStackTrace();
throw new RuntimeException(e);
}
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class ApproximateQueryTestUtil method runQuery.
public static Object runQuery(QueryExecutor queryExecutor, List<String> segments, AvroQueryGenerator.TestAggreationQuery query, ServerMetrics metrics) {
LOGGER.info("\nRunning: " + query.getPql());
final BrokerRequest brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(query.getPql());
InstanceRequest instanceRequest = new InstanceRequest(counter++, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
for (String segment : segments) {
instanceRequest.getSearchSegments().add(segment);
}
QueryRequest queryRequest = new QueryRequest(instanceRequest, metrics);
final DataTable instanceResponse = queryExecutor.processQuery(queryRequest, queryRunners);
final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse);
final BrokerResponseNative brokerResponse = REDUCE_SERVICE.reduceOnDataTable(brokerRequest, instanceResponseMap);
AggregationResult result = brokerResponse.getAggregationResults().get(0);
Assert.assertNotNull(result);
if (result.getValue() != null) {
LOGGER.info("Aggregation Result is " + result.getValue().toString());
} else if (result.getGroupByResult() != null) {
LOGGER.info("GroupBy Result is " + result.getGroupByResult().toString());
} else {
throw new RuntimeException("Aggregation and GroupBy Results both null.");
}
// compute value
Object val;
if (query instanceof AvroQueryGenerator.TestSimpleAggreationQuery) {
val = Double.parseDouble(brokerResponse.getAggregationResults().get(0).getValue().toString());
} else {
val = brokerResponse.getAggregationResults().get(0).getGroupByResult();
}
return val;
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class BrokerRequestHandler method processOptimizedBrokerRequests.
/**
* Process the optimized broker requests for both OFFLINE and REALTIME table.
*
* @param originalBrokerRequest original broker request.
* @param offlineBrokerRequest broker request for OFFLINE table.
* @param realtimeBrokerRequest broker request for REALTIME table.
* @param reduceService reduce service.
* @param bucketingSelection customized bucketing selection.
* @param scatterGatherStats scatter-gather statistics.
* @param requestId request ID.
* @return broker response.
* @throws InterruptedException
*/
@Nonnull
private BrokerResponse processOptimizedBrokerRequests(@Nonnull BrokerRequest originalBrokerRequest, @Nullable BrokerRequest offlineBrokerRequest, @Nullable BrokerRequest realtimeBrokerRequest, @Nonnull ReduceService reduceService, @Nonnull ScatterGatherStats scatterGatherStats, @Nullable BucketingSelection bucketingSelection, long requestId) throws InterruptedException {
String originalTableName = originalBrokerRequest.getQuerySource().getTableName();
ResponseType serverResponseType = BrokerResponseFactory.getResponseType(originalBrokerRequest.getResponseFormat());
PhaseTimes phaseTimes = new PhaseTimes();
// Step 1: find the candidate servers to be queried for each set of segments from the routing table.
// Step 2: select servers for each segment set and scatter request to the servers.
String offlineTableName = null;
CompositeFuture<ServerInstance, ByteBuf> offlineCompositeFuture = null;
if (offlineBrokerRequest != null) {
offlineTableName = offlineBrokerRequest.getQuerySource().getTableName();
offlineCompositeFuture = routeAndScatterBrokerRequest(offlineBrokerRequest, phaseTimes, scatterGatherStats, true, bucketingSelection, requestId);
}
String realtimeTableName = null;
CompositeFuture<ServerInstance, ByteBuf> realtimeCompositeFuture = null;
if (realtimeBrokerRequest != null) {
realtimeTableName = realtimeBrokerRequest.getQuerySource().getTableName();
realtimeCompositeFuture = routeAndScatterBrokerRequest(realtimeBrokerRequest, phaseTimes, scatterGatherStats, false, bucketingSelection, requestId);
}
if ((offlineCompositeFuture == null) && (realtimeCompositeFuture == null)) {
// No server found in either OFFLINE or REALTIME table.
return BrokerResponseFactory.getStaticEmptyBrokerResponse(serverResponseType);
}
// Step 3: gather response from the servers.
int numServersQueried = 0;
long gatherStartTime = System.nanoTime();
List<ProcessingException> processingExceptions = new ArrayList<>();
Map<ServerInstance, ByteBuf> offlineServerResponseMap = null;
Map<ServerInstance, ByteBuf> realtimeServerResponseMap = null;
if (offlineCompositeFuture != null) {
numServersQueried += offlineCompositeFuture.getNumFutures();
offlineServerResponseMap = gatherServerResponses(offlineCompositeFuture, scatterGatherStats, true, offlineTableName, processingExceptions);
}
if (realtimeCompositeFuture != null) {
numServersQueried += realtimeCompositeFuture.getNumFutures();
realtimeServerResponseMap = gatherServerResponses(realtimeCompositeFuture, scatterGatherStats, false, realtimeTableName, processingExceptions);
}
phaseTimes.addToGatherTime(System.nanoTime() - gatherStartTime);
if ((offlineServerResponseMap == null) && (realtimeServerResponseMap == null)) {
// No response gathered.
return BrokerResponseFactory.getBrokerResponseWithExceptions(serverResponseType, processingExceptions);
}
//Step 4: deserialize the server responses.
int numServersResponded = 0;
long deserializationStartTime = System.nanoTime();
Map<ServerInstance, DataTable> dataTableMap = new HashMap<>();
if (offlineServerResponseMap != null) {
numServersResponded += offlineServerResponseMap.size();
deserializeServerResponses(offlineServerResponseMap, true, dataTableMap, offlineTableName, processingExceptions);
}
if (realtimeServerResponseMap != null) {
numServersResponded += realtimeServerResponseMap.size();
deserializeServerResponses(realtimeServerResponseMap, false, dataTableMap, realtimeTableName, processingExceptions);
}
phaseTimes.addToDeserializationTime(System.nanoTime() - deserializationStartTime);
// Step 5: reduce (merge) the server responses and create a broker response to be returned.
long reduceStartTime = System.nanoTime();
BrokerResponse brokerResponse = reduceService.reduceOnDataTable(originalBrokerRequest, dataTableMap, _brokerMetrics);
phaseTimes.addToReduceTime(System.nanoTime() - reduceStartTime);
// Set processing exceptions and number of servers queried/responded.
brokerResponse.setExceptions(processingExceptions);
brokerResponse.setNumServersQueried(numServersQueried);
brokerResponse.setNumServersResponded(numServersResponded);
// Update broker metrics.
phaseTimes.addPhaseTimesToBrokerMetrics(_brokerMetrics, originalTableName);
if (brokerResponse.getExceptionsSize() > 0) {
_brokerMetrics.addMeteredTableValue(originalTableName, BrokerMeter.BROKER_RESPONSES_WITH_PROCESSING_EXCEPTIONS, 1);
}
if (numServersQueried > numServersResponded) {
_brokerMetrics.addMeteredTableValue(originalTableName, BrokerMeter.BROKER_RESPONSES_WITH_PARTIAL_SERVERS_RESPONDED, 1);
}
return brokerResponse;
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class IntermediateResultsBlock method getAggregationGroupByResultDataTable.
@Nonnull
private DataTable getAggregationGroupByResultDataTable() throws Exception {
String[] columnNames = new String[] { "functionName", "GroupByResultMap" };
FieldSpec.DataType[] columnTypes = new FieldSpec.DataType[] { FieldSpec.DataType.STRING, FieldSpec.DataType.OBJECT };
// Build the data table.
DataTableBuilder dataTableBuilder = new DataTableBuilder(new DataSchema(columnNames, columnTypes));
int numAggregationFunctions = _aggregationFunctionContexts.length;
for (int i = 0; i < numAggregationFunctions; i++) {
dataTableBuilder.startRow();
AggregationFunctionContext aggregationFunctionContext = _aggregationFunctionContexts[i];
dataTableBuilder.setColumn(0, aggregationFunctionContext.getAggregationColumnName());
dataTableBuilder.setColumn(1, _combinedAggregationGroupByResult.get(i));
dataTableBuilder.finishRow();
}
DataTable dataTable = dataTableBuilder.build();
return attachMetadataToDataTable(dataTable);
}
Aggregations