use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class QueryExceptionTest method testQueryParsingFailedQuery.
@Test
public void testQueryParsingFailedQuery() throws Exception {
String query = "select sudm(blablaa) from testTable where column1='24516187'";
LOGGER.info("running : " + query);
final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
final BrokerRequest brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(query);
InstanceRequest instanceRequest = new InstanceRequest(1, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
instanceRequest.getSearchSegments().add(segmentName);
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
final DataTable instanceResponse = QUERY_EXECUTOR.processQuery(queryRequest, queryRunners);
instanceResponseMap.clear();
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse);
final BrokerResponseNative brokerResponse = REDUCE_SERVICE.reduceOnDataTable(brokerRequest, instanceResponseMap);
LOGGER.info("BrokerResponse is {}", brokerResponse);
Assert.assertTrue(brokerResponse.getExceptionsSize() > 0);
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class RealtimeQueriesSentinelTest method test1.
@Test
public void test1() throws Exception {
final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
String query = "select sum('count') from testTable where column13='1540094560' group by column3 top 100 limit 0";
Map<Object, Double> fromAvro = new HashMap<Object, Double>();
fromAvro.put(null, 2.0D);
fromAvro.put("", 1.2469280068E10D);
fromAvro.put("F", 127.0D);
fromAvro.put("A", 20.0D);
fromAvro.put("H", 29.0D);
final BrokerRequest brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(query);
InstanceRequest instanceRequest = new InstanceRequest(485, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
instanceRequest.getSearchSegments().add("testTable_testTable");
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse = QUERY_EXECUTOR.processQuery(queryRequest, queryRunners);
instanceResponseMap.clear();
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse);
BrokerResponseNative brokerResponse = REDUCE_SERVICE.reduceOnDataTable(brokerRequest, instanceResponseMap);
JSONArray actual = brokerResponse.toJson().getJSONArray("aggregationResults").getJSONObject(0).getJSONArray("groupByResult");
assertGroupByResults(actual, fromAvro);
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class RealtimeQueriesSentinelTest method testAggregation.
@Test
public void testAggregation() throws Exception {
int counter = 0;
final Map<ServerInstance, DataTable> instanceResponseMap = new HashMap<ServerInstance, DataTable>();
final List<TestSimpleAggreationQuery> aggCalls = AVRO_QUERY_GENERATOR.giveMeNSimpleAggregationQueries(10000);
for (final TestSimpleAggreationQuery aggCall : aggCalls) {
LOGGER.info("running " + counter + " : " + aggCall.pql);
final BrokerRequest brokerRequest = REQUEST_COMPILER.compileToBrokerRequest(aggCall.pql);
InstanceRequest instanceRequest = new InstanceRequest(counter++, brokerRequest);
instanceRequest.setSearchSegments(new ArrayList<String>());
instanceRequest.getSearchSegments().add("testTable_testTable");
QueryRequest queryRequest = new QueryRequest(instanceRequest, TableDataManagerProvider.getServerMetrics());
DataTable instanceResponse = QUERY_EXECUTOR.processQuery(queryRequest, queryRunners);
instanceResponseMap.clear();
instanceResponseMap.put(new ServerInstance("localhost:0000"), instanceResponse);
final BrokerResponseNative brokerResponse = REDUCE_SERVICE.reduceOnDataTable(brokerRequest, instanceResponseMap);
LOGGER.info("BrokerResponse is " + brokerResponse.getAggregationResults().get(0));
LOGGER.info("Result from avro is : " + aggCall.result);
Double actual = Double.parseDouble(brokerResponse.getAggregationResults().get(0).getValue().toString());
Double expected = aggCall.result;
try {
Assert.assertEquals(actual, expected);
} catch (AssertionError e) {
System.out.println("********************************");
System.out.println("query : " + aggCall.pql);
System.out.println("actual : " + actual);
System.out.println("expected : " + aggCall.result);
System.out.println("********************************");
throw e;
}
}
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class ScheduledRequestHandler method processRequest.
@Override
public ListenableFuture<byte[]> processRequest(ChannelHandlerContext channelHandlerContext, ByteBuf request) {
final long queryStartTimeNs = System.nanoTime();
serverMetrics.addMeteredGlobalValue(ServerMeter.QUERIES, 1);
LOGGER.debug("Processing request : {}", request);
byte[] byteArray = new byte[request.readableBytes()];
request.readBytes(byteArray);
SerDe serDe = new SerDe(new TCompactProtocol.Factory());
final InstanceRequest instanceRequest = new InstanceRequest();
if (!serDe.deserialize(instanceRequest, byteArray)) {
LOGGER.error("Failed to deserialize query request from broker ip: {}", ((InetSocketAddress) channelHandlerContext.channel().remoteAddress()).getAddress().getHostAddress());
DataTable result = new DataTableImplV2();
result.addException(QueryException.INTERNAL_ERROR);
serverMetrics.addMeteredGlobalValue(ServerMeter.REQUEST_DESERIALIZATION_EXCEPTIONS, 1);
QueryRequest queryRequest = new QueryRequest(null, serverMetrics);
queryRequest.getTimerContext().setQueryArrivalTimeNs(queryStartTimeNs);
return Futures.immediateFuture(serializeDataTable(queryRequest, result));
}
final QueryRequest queryRequest = new QueryRequest(instanceRequest, serverMetrics);
final TimerContext timerContext = queryRequest.getTimerContext();
timerContext.setQueryArrivalTimeNs(queryStartTimeNs);
TimerContext.Timer deserializationTimer = timerContext.startNewPhaseTimerAtNs(ServerQueryPhase.REQUEST_DESERIALIZATION, queryStartTimeNs);
deserializationTimer.stopAndRecord();
LOGGER.debug("Processing requestId:{},request={}", instanceRequest.getRequestId(), instanceRequest);
ListenableFuture<DataTable> queryTask = queryScheduler.submit(queryRequest);
// following future will provide default response in case of uncaught
// exceptions from query processing
ListenableFuture<DataTable> queryResponse = Futures.catching(queryTask, Throwable.class, new Function<Throwable, DataTable>() {
@Nullable
@Override
public DataTable apply(@Nullable Throwable input) {
// this is called iff queryTask fails with unhandled exception
serverMetrics.addMeteredGlobalValue(ServerMeter.UNCAUGHT_EXCEPTIONS, 1);
DataTable result = new DataTableImplV2();
result.addException(QueryException.INTERNAL_ERROR);
return result;
}
});
// transform the DataTable to serialized byte[] to send back to broker
ListenableFuture<byte[]> serializedQueryResponse = Futures.transform(queryResponse, new Function<DataTable, byte[]>() {
@Nullable
@Override
public byte[] apply(@Nullable DataTable instanceResponse) {
byte[] responseData = serializeDataTable(queryRequest, instanceResponse);
LOGGER.info("Processed requestId {},reqSegments={},prunedToSegmentCount={},deserTimeMs={},planTimeMs={},planExecTimeMs={},totalExecMs={},serTimeMs={}TotalTimeMs={},broker={}", queryRequest.getInstanceRequest().getRequestId(), queryRequest.getInstanceRequest().getSearchSegments().size(), queryRequest.getSegmentCountAfterPruning(), timerContext.getPhaseDurationMs(ServerQueryPhase.REQUEST_DESERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.BUILD_QUERY_PLAN), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PLAN_EXECUTION), timerContext.getPhaseDurationMs(ServerQueryPhase.QUERY_PROCESSING), timerContext.getPhaseDurationMs(ServerQueryPhase.RESPONSE_SERIALIZATION), timerContext.getPhaseDurationMs(ServerQueryPhase.TOTAL_QUERY_TIME), queryRequest.getBrokerId());
return responseData;
}
});
return serializedQueryResponse;
}
use of com.linkedin.pinot.common.utils.DataTable in project pinot by linkedin.
the class ScheduledRequestHandlerTest method testQueryProcessingException.
@Test
public void testQueryProcessingException() throws Exception {
ScheduledRequestHandler handler = new ScheduledRequestHandler(new QueryScheduler(queryExecutor) {
@Override
public ListenableFuture<DataTable> submit(QueryRequest queryRequest) {
return queryWorkers.submit(new Callable<DataTable>() {
@Override
public DataTable call() throws Exception {
throw new RuntimeException("query processing error");
}
});
}
}, serverMetrics);
ByteBuf requestBuf = getSerializedInstanceRequest(getInstanceRequest());
ListenableFuture<byte[]> responseFuture = handler.processRequest(channelHandlerContext, requestBuf);
byte[] bytes = responseFuture.get(2, TimeUnit.SECONDS);
// we get DataTable with exception information in case of query processing exception
Assert.assertTrue(bytes.length > 0);
DataTable expectedDT = new DataTableImplV2();
expectedDT.addException(QueryException.INTERNAL_ERROR);
Assert.assertEquals(bytes, expectedDT.toBytes());
}
Aggregations