use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class BrokerRequestHandler method processOptimizedBrokerRequests.
/**
* Process the optimized broker requests for both OFFLINE and REALTIME table.
*
* @param originalBrokerRequest original broker request.
* @param offlineBrokerRequest broker request for OFFLINE table.
* @param realtimeBrokerRequest broker request for REALTIME table.
* @param reduceService reduce service.
* @param bucketingSelection customized bucketing selection.
* @param scatterGatherStats scatter-gather statistics.
* @param requestId request ID.
* @return broker response.
* @throws InterruptedException
*/
@Nonnull
private BrokerResponse processOptimizedBrokerRequests(@Nonnull BrokerRequest originalBrokerRequest, @Nullable BrokerRequest offlineBrokerRequest, @Nullable BrokerRequest realtimeBrokerRequest, @Nonnull ReduceService reduceService, @Nonnull ScatterGatherStats scatterGatherStats, @Nullable BucketingSelection bucketingSelection, long requestId) throws InterruptedException {
String originalTableName = originalBrokerRequest.getQuerySource().getTableName();
ResponseType serverResponseType = BrokerResponseFactory.getResponseType(originalBrokerRequest.getResponseFormat());
PhaseTimes phaseTimes = new PhaseTimes();
// Step 1: find the candidate servers to be queried for each set of segments from the routing table.
// Step 2: select servers for each segment set and scatter request to the servers.
String offlineTableName = null;
CompositeFuture<ServerInstance, ByteBuf> offlineCompositeFuture = null;
if (offlineBrokerRequest != null) {
offlineTableName = offlineBrokerRequest.getQuerySource().getTableName();
offlineCompositeFuture = routeAndScatterBrokerRequest(offlineBrokerRequest, phaseTimes, scatterGatherStats, true, bucketingSelection, requestId);
}
String realtimeTableName = null;
CompositeFuture<ServerInstance, ByteBuf> realtimeCompositeFuture = null;
if (realtimeBrokerRequest != null) {
realtimeTableName = realtimeBrokerRequest.getQuerySource().getTableName();
realtimeCompositeFuture = routeAndScatterBrokerRequest(realtimeBrokerRequest, phaseTimes, scatterGatherStats, false, bucketingSelection, requestId);
}
if ((offlineCompositeFuture == null) && (realtimeCompositeFuture == null)) {
// No server found in either OFFLINE or REALTIME table.
return BrokerResponseFactory.getStaticEmptyBrokerResponse(serverResponseType);
}
// Step 3: gather response from the servers.
int numServersQueried = 0;
long gatherStartTime = System.nanoTime();
List<ProcessingException> processingExceptions = new ArrayList<>();
Map<ServerInstance, ByteBuf> offlineServerResponseMap = null;
Map<ServerInstance, ByteBuf> realtimeServerResponseMap = null;
if (offlineCompositeFuture != null) {
numServersQueried += offlineCompositeFuture.getNumFutures();
offlineServerResponseMap = gatherServerResponses(offlineCompositeFuture, scatterGatherStats, true, offlineTableName, processingExceptions);
}
if (realtimeCompositeFuture != null) {
numServersQueried += realtimeCompositeFuture.getNumFutures();
realtimeServerResponseMap = gatherServerResponses(realtimeCompositeFuture, scatterGatherStats, false, realtimeTableName, processingExceptions);
}
phaseTimes.addToGatherTime(System.nanoTime() - gatherStartTime);
if ((offlineServerResponseMap == null) && (realtimeServerResponseMap == null)) {
// No response gathered.
return BrokerResponseFactory.getBrokerResponseWithExceptions(serverResponseType, processingExceptions);
}
//Step 4: deserialize the server responses.
int numServersResponded = 0;
long deserializationStartTime = System.nanoTime();
Map<ServerInstance, DataTable> dataTableMap = new HashMap<>();
if (offlineServerResponseMap != null) {
numServersResponded += offlineServerResponseMap.size();
deserializeServerResponses(offlineServerResponseMap, true, dataTableMap, offlineTableName, processingExceptions);
}
if (realtimeServerResponseMap != null) {
numServersResponded += realtimeServerResponseMap.size();
deserializeServerResponses(realtimeServerResponseMap, false, dataTableMap, realtimeTableName, processingExceptions);
}
phaseTimes.addToDeserializationTime(System.nanoTime() - deserializationStartTime);
// Step 5: reduce (merge) the server responses and create a broker response to be returned.
long reduceStartTime = System.nanoTime();
BrokerResponse brokerResponse = reduceService.reduceOnDataTable(originalBrokerRequest, dataTableMap, _brokerMetrics);
phaseTimes.addToReduceTime(System.nanoTime() - reduceStartTime);
// Set processing exceptions and number of servers queried/responded.
brokerResponse.setExceptions(processingExceptions);
brokerResponse.setNumServersQueried(numServersQueried);
brokerResponse.setNumServersResponded(numServersResponded);
// Update broker metrics.
phaseTimes.addPhaseTimesToBrokerMetrics(_brokerMetrics, originalTableName);
if (brokerResponse.getExceptionsSize() > 0) {
_brokerMetrics.addMeteredTableValue(originalTableName, BrokerMeter.BROKER_RESPONSES_WITH_PROCESSING_EXCEPTIONS, 1);
}
if (numServersQueried > numServersResponded) {
_brokerMetrics.addMeteredTableValue(originalTableName, BrokerMeter.BROKER_RESPONSES_WITH_PARTIAL_SERVERS_RESPONDED, 1);
}
return brokerResponse;
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class BrokerRequestHandler method deserializeServerResponses.
/**
* Deserialize the server responses, put the de-serialized data table into the data table map passed in, append
* processing exceptions to the processing exception list passed in.
* <p>For hybrid use case, multiple responses might be from the same instance. Use response sequence to distinguish
* them.
*
* @param responseMap map from server to response.
* @param isOfflineTable whether the responses are from an OFFLINE table.
* @param dataTableMap map from server to data table.
* @param tableName table name.
* @param processingExceptions list of processing exceptions.
*/
private void deserializeServerResponses(@Nonnull Map<ServerInstance, ByteBuf> responseMap, boolean isOfflineTable, @Nonnull Map<ServerInstance, DataTable> dataTableMap, @Nonnull String tableName, @Nonnull List<ProcessingException> processingExceptions) {
for (Entry<ServerInstance, ByteBuf> entry : responseMap.entrySet()) {
ServerInstance serverInstance = entry.getKey();
if (!isOfflineTable) {
serverInstance = new ServerInstance(serverInstance.getHostname(), serverInstance.getPort(), 1);
}
ByteBuf byteBuf = entry.getValue();
try {
byte[] byteArray = new byte[byteBuf.readableBytes()];
byteBuf.readBytes(byteArray);
dataTableMap.put(serverInstance, DataTableFactory.getDataTable(byteArray));
} catch (Exception e) {
LOGGER.error("Caught exceptions while deserializing response for table: {} from server: {}", tableName, serverInstance, e);
_brokerMetrics.addMeteredTableValue(tableName, BrokerMeter.DATA_TABLE_DESERIALIZATION_EXCEPTIONS, 1);
processingExceptions.add(QueryException.getException(QueryException.DATA_TABLE_DESERIALIZATION_ERROR, e));
}
}
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class BrokerServerBuilder method buildNetwork.
public void buildNetwork() throws ConfigurationException {
// build transport
Configuration transportConfigs = _config.subset(TRANSPORT_CONFIG_PREFIX);
TransportClientConf conf = new TransportClientConf();
conf.init(transportConfigs);
_registry = new MetricsRegistry();
MetricsHelper.initializeMetrics(_config.subset(METRICS_CONFIG_PREFIX));
MetricsHelper.registerMetricsRegistry(_registry);
_brokerMetrics = new BrokerMetrics(_registry, !emitTableLevelMetrics());
_brokerMetrics.initializeGlobalMeters();
_state.set(State.INIT);
_eventLoopGroup = new NioEventLoopGroup();
/**
* Some of the client metrics uses histogram which is doing synchronous operation.
* These are fixed overhead per request/response.
* TODO: Measure the overhead of this.
*/
final NettyClientMetrics clientMetrics = new NettyClientMetrics(_registry, "client_");
// Setup Netty Connection Pool
_resourceManager = new PooledNettyClientResourceManager(_eventLoopGroup, new HashedWheelTimer(), clientMetrics);
_poolTimeoutExecutor = new ScheduledThreadPoolExecutor(50);
// _requestSenderPool = MoreExecutors.sameThreadExecutor();
_requestSenderPool = Executors.newCachedThreadPool();
final ConnectionPoolConfig connPoolCfg = conf.getConnPool();
_connPool = new KeyedPoolImpl<ServerInstance, NettyClientConnection>(connPoolCfg.getMinConnectionsPerServer(), connPoolCfg.getMaxConnectionsPerServer(), connPoolCfg.getIdleTimeoutMs(), connPoolCfg.getMaxBacklogPerServer(), _resourceManager, _poolTimeoutExecutor, _requestSenderPool, _registry);
// MoreExecutors.sameThreadExecutor(), _registry);
_resourceManager.setPool(_connPool);
// Setup Routing Table
if (conf.getRoutingMode() == RoutingMode.CONFIG) {
final CfgBasedRouting rt = new CfgBasedRouting();
rt.init(conf.getCfgBasedRouting());
_routingTable = rt;
} else {
// Helix based routing is already initialized.
}
// Setup ScatterGather
_scatterGather = new ScatterGatherImpl(_connPool, _requestSenderPool);
// Setup Broker Request Handler
ReduceServiceRegistry reduceServiceRegistry = buildReduceServiceRegistry();
_requestHandler = new BrokerRequestHandler(_routingTable, _timeBoundaryService, _scatterGather, reduceServiceRegistry, _brokerMetrics, _config);
LOGGER.info("Network initialized !!");
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class QueriesSentinelTest method testTrace.
@Test
public void testTrace() throws Exception {
String query = "select count(*) from testTable where column1='186154188'";
LOGGER.info("running : " + query);
final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
final BrokerRequest brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(query);
//
brokerRequest.setEnableTrace(true);
InstanceRequest instanceRequest = new InstanceRequest(1, brokerRequest);
// TODO: add trace settings consistency
instanceRequest.setEnableTrace(true);
instanceRequest.setSearchSegments(new ArrayList<String>());
instanceRequest.getSearchSegments().add(segmentName);
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
final DataTable instanceResponse = QUERY_EXECUTOR.processQuery(queryRequest, queryRunners);
instanceResponseMap.clear();
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse);
final BrokerResponseNative brokerResponse = REDUCE_SERVICE.reduceOnDataTable(brokerRequest, instanceResponseMap);
LOGGER.info("BrokerResponse is " + brokerResponse.getAggregationResults().get(0));
//
LOGGER.info("TraceInfo is " + brokerResponse.getTraceInfo());
}
use of com.linkedin.pinot.common.response.ServerInstance in project pinot by linkedin.
the class QueriesSentinelTest method testAggregationGroupBy.
@Test
public void testAggregationGroupBy() throws Exception {
final List<TestGroupByAggreationQuery> groupByCalls = AVRO_QUERY_GENERATOR.giveMeNGroupByAggregationQueries(10000);
int counter = 0;
final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
for (final TestGroupByAggreationQuery groupBy : groupByCalls) {
LOGGER.info("running " + counter + " : " + groupBy.pql);
final BrokerRequest brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(groupBy.pql);
InstanceRequest instanceRequest = new InstanceRequest(counter++, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
instanceRequest.getSearchSegments().add(segmentName);
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
final DataTable instanceResponse = QUERY_EXECUTOR.processQuery(queryRequest, queryRunners);
instanceResponseMap.clear();
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse);
final BrokerResponseNative brokerResponse = REDUCE_SERVICE.reduceOnDataTable(brokerRequest, instanceResponseMap);
LOGGER.info("BrokerResponse is " + brokerResponse.getAggregationResults().get(0));
LOGGER.info("Result from avro is : " + groupBy.groupResults);
assertGroupByResults(brokerResponse.getAggregationResults().get(0).getGroupByResult(), groupBy.groupResults);
}
}
Aggregations