use of org.apache.druid.server.QueryStats in project druid by druid-io.
the class DefaultRequestLogEventTest method testDefaultRequestLogEventSerde.
@Test
public void testDefaultRequestLogEventSerde() throws Exception {
RequestLogLine nativeLine = RequestLogLine.forNative(new TimeseriesQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2015-01-01/2015-01-02"))), true, VirtualColumns.EMPTY, null, Granularities.ALL, ImmutableList.of(), ImmutableList.of(), 5, ImmutableMap.of("key", "value")), DateTimes.of(2019, 12, 12, 3, 1), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 13L, "query/bytes", 10L, "success", true, "identity", "allowAll")));
DefaultRequestLogEvent defaultRequestLogEvent = new DefaultRequestLogEvent(ImmutableMap.of("service", "druid-service", "host", "127.0.0.1"), "feed", nativeLine);
String logEventJson = objectMapper.writeValueAsString(defaultRequestLogEvent);
String expected = "{\"feed\":\"feed\",\"query\":{\"queryType\":\"timeseries\",\"dataSource\":{\"type\":\"table\",\"name\":\"dummy\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"2015-01-01T00:00:00.000Z/2015-01-02T00:00:00.000Z\"]},\"descending\":true,\"virtualColumns\":[],\"filter\":null,\"granularity\":{\"type\":\"all\"},\"aggregations\":[],\"postAggregations\":[],\"limit\":5,\"context\":{\"key\":\"value\"}},\"host\":\"127.0.0.1\",\"timestamp\":\"2019-12-12T03:01:00.000Z\",\"service\":\"druid-service\",\"sql\":null,\"sqlQueryContext\":{},\"remoteAddr\":\"127.0.0.1\",\"queryStats\":{\"query/time\":13,\"query/bytes\":10,\"success\":true,\"identity\":\"allowAll\"}}";
Assert.assertEquals(objectMapper.readTree(expected), objectMapper.readTree(logEventJson));
}
use of org.apache.druid.server.QueryStats in project druid by druid-io.
the class FilteredRequestLoggerTest method testNotFilterAboveThreshold.
@Test
public void testNotFilterAboveThreshold() throws IOException {
RequestLogger delegate = EasyMock.createStrictMock(RequestLogger.class);
delegate.logNativeQuery(EasyMock.anyObject());
EasyMock.expectLastCall().times(2);
delegate.logSqlQuery(EasyMock.anyObject());
EasyMock.expectLastCall().times(2);
FilteredRequestLoggerProvider.FilteredRequestLogger logger = new FilteredRequestLoggerProvider.FilteredRequestLogger(delegate, 1000, 2000, ImmutableList.of());
RequestLogLine nativeRequestLogLine = EasyMock.createMock(RequestLogLine.class);
EasyMock.expect(nativeRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("query/time", 10000))).once();
EasyMock.expect(nativeRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("query/time", 1000))).once();
EasyMock.expect(nativeRequestLogLine.getQuery()).andReturn(testSegmentMetadataQuery).times(2);
RequestLogLine sqlRequestLogLine = EasyMock.createMock(RequestLogLine.class);
EasyMock.expect(sqlRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("sqlQuery/time", 10000))).once();
EasyMock.expect(sqlRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("sqlQuery/time", 2000))).once();
EasyMock.expect(sqlRequestLogLine.getQuery()).andReturn(testSegmentMetadataQuery).times(2);
EasyMock.replay(nativeRequestLogLine, sqlRequestLogLine, delegate);
logger.logNativeQuery(nativeRequestLogLine);
logger.logNativeQuery(nativeRequestLogLine);
logger.logSqlQuery(sqlRequestLogLine);
logger.logSqlQuery(sqlRequestLogLine);
EasyMock.verify(nativeRequestLogLine, sqlRequestLogLine, delegate);
}
use of org.apache.druid.server.QueryStats in project druid by druid-io.
the class FilteredRequestLoggerTest method testFilterBelowThreshold.
@Test
public void testFilterBelowThreshold() throws IOException {
RequestLogger delegate = EasyMock.createStrictMock(RequestLogger.class);
delegate.logNativeQuery(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new IOException());
delegate.logSqlQuery(EasyMock.anyObject());
EasyMock.expectLastCall().andThrow(new IOException());
FilteredRequestLoggerProvider.FilteredRequestLogger logger = new FilteredRequestLoggerProvider.FilteredRequestLogger(delegate, 1000, 2000, ImmutableList.of());
RequestLogLine nativeRequestLogLine = EasyMock.createMock(RequestLogLine.class);
EasyMock.expect(nativeRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("query/time", 100))).once();
RequestLogLine sqlRequestLogLine = EasyMock.createMock(RequestLogLine.class);
EasyMock.expect(sqlRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("sqlQuery/time", 1000)));
EasyMock.replay(nativeRequestLogLine, sqlRequestLogLine, delegate);
logger.logNativeQuery(nativeRequestLogLine);
logger.logSqlQuery(sqlRequestLogLine);
}
use of org.apache.druid.server.QueryStats in project druid by druid-io.
the class MovingAverageQueryRunner method run.
@Override
public Sequence<Row> run(QueryPlus<Row> query, ResponseContext responseContext) {
MovingAverageQuery maq = (MovingAverageQuery) query.getQuery();
List<Interval> intervals;
final Period period;
// Get the largest bucket from the list of averagers
Optional<Integer> opt = maq.getAveragerSpecs().stream().map(AveragerFactory::getNumBuckets).max(Integer::compare);
int buckets = opt.orElse(0);
// Extend the interval beginning by specified bucket - 1
if (maq.getGranularity() instanceof PeriodGranularity) {
period = ((PeriodGranularity) maq.getGranularity()).getPeriod();
int offset = buckets <= 0 ? 0 : (1 - buckets);
intervals = maq.getIntervals().stream().map(i -> new Interval(i.getStart().withPeriodAdded(period, offset), i.getEnd())).collect(Collectors.toList());
} else {
throw new ISE("Only PeriodGranulaity is supported for movingAverage queries");
}
Sequence<Row> resultsSeq;
DataSource dataSource = maq.getDataSource();
if (maq.getDimensions() != null && !maq.getDimensions().isEmpty() && (dataSource instanceof TableDataSource || dataSource instanceof UnionDataSource || dataSource instanceof QueryDataSource)) {
// build groupBy query from movingAverage query
GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(dataSource).setInterval(intervals).setDimFilter(maq.getFilter()).setGranularity(maq.getGranularity()).setDimensions(maq.getDimensions()).setAggregatorSpecs(maq.getAggregatorSpecs()).setPostAggregatorSpecs(maq.getPostAggregatorSpecs()).setContext(maq.getContext());
GroupByQuery gbq = builder.build();
ResponseContext gbqResponseContext = ResponseContext.createEmpty();
gbqResponseContext.merge(responseContext);
gbqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(gbq));
Sequence<ResultRow> results = gbq.getRunner(walker).run(QueryPlus.wrap(gbq), gbqResponseContext);
try {
// use localhost for remote address
requestLogger.logNativeQuery(RequestLogLine.forNative(gbq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
} catch (Exception e) {
throw Throwables.propagate(e);
}
resultsSeq = results.map(row -> row.toMapBasedRow(gbq));
} else {
// no dimensions, so optimize this as a TimeSeries
TimeseriesQuery tsq = new TimeseriesQuery(dataSource, new MultipleIntervalSegmentSpec(intervals), false, null, maq.getFilter(), maq.getGranularity(), maq.getAggregatorSpecs(), maq.getPostAggregatorSpecs(), 0, maq.getContext());
ResponseContext tsqResponseContext = ResponseContext.createEmpty();
tsqResponseContext.merge(responseContext);
tsqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(tsq));
Sequence<Result<TimeseriesResultValue>> results = tsq.getRunner(walker).run(QueryPlus.wrap(tsq), tsqResponseContext);
try {
// use localhost for remote address
requestLogger.logNativeQuery(RequestLogLine.forNative(tsq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
} catch (Exception e) {
throw Throwables.propagate(e);
}
resultsSeq = Sequences.map(results, new TimeseriesResultToRow());
}
// Process into period buckets
Sequence<RowBucket> bucketedMovingAvgResults = Sequences.simple(new RowBucketIterable(resultsSeq, intervals, period));
// Apply the windows analysis functions
Sequence<Row> movingAvgResults = Sequences.simple(new MovingAverageIterable(bucketedMovingAvgResults, maq.getDimensions(), maq.getAveragerSpecs(), maq.getPostAggregatorSpecs(), maq.getAggregatorSpecs()));
// Apply any postAveragers
Sequence<Row> movingAvgResultsWithPostAveragers = Sequences.map(movingAvgResults, new PostAveragerAggregatorCalculator(maq));
// remove rows outside the reporting window
List<Interval> reportingIntervals = maq.getIntervals();
movingAvgResults = Sequences.filter(movingAvgResultsWithPostAveragers, row -> reportingIntervals.stream().anyMatch(i -> i.contains(row.getTimestamp())));
// Apply any having, sorting, and limits
movingAvgResults = maq.applyLimit(movingAvgResults);
return movingAvgResults;
}
use of org.apache.druid.server.QueryStats in project druid by druid-io.
the class KafkaEmitterTest method testKafkaEmitter.
// there is 1 seconds wait in kafka emitter before it starts sending events to broker, set a timeout for 5 seconds
@Test(timeout = 5_000)
public void testKafkaEmitter() throws InterruptedException {
final List<ServiceMetricEvent> serviceMetricEvents = ImmutableList.of(ServiceMetricEvent.builder().build("m1", 1).build("service", "host"));
final List<AlertEvent> alertEvents = ImmutableList.of(new AlertEvent("service", "host", "description"));
final List<RequestLogEvent> requestLogEvents = ImmutableList.of(DefaultRequestLogEventBuilderFactory.instance().createRequestLogEventBuilder("requests", RequestLogLine.forSql("", null, DateTimes.nowUtc(), null, new QueryStats(ImmutableMap.of()))).build("service", "host"));
int totalEvents = serviceMetricEvents.size() + alertEvents.size() + requestLogEvents.size();
int totalEventsExcludingRequestLogEvents = totalEvents - requestLogEvents.size();
final CountDownLatch countDownSentEvents = new CountDownLatch(requestTopic == null ? totalEventsExcludingRequestLogEvents : totalEvents);
final KafkaProducer<String, String> producer = mock(KafkaProducer.class);
final KafkaEmitter kafkaEmitter = new KafkaEmitter(new KafkaEmitterConfig("", "metrics", "alerts", requestTopic, "test-cluster", null), new ObjectMapper()) {
@Override
protected Producer<String, String> setKafkaProducer() {
// override send interval to 1 second
sendInterval = 1;
return producer;
}
};
when(producer.send(any(), any())).then((invocation) -> {
countDownSentEvents.countDown();
return null;
});
kafkaEmitter.start();
for (Event event : serviceMetricEvents) {
kafkaEmitter.emit(event);
}
for (Event event : alertEvents) {
kafkaEmitter.emit(event);
}
for (Event event : requestLogEvents) {
kafkaEmitter.emit(event);
}
countDownSentEvents.await();
Assert.assertEquals(0, kafkaEmitter.getMetricLostCount());
Assert.assertEquals(0, kafkaEmitter.getAlertLostCount());
Assert.assertEquals(requestTopic == null ? requestLogEvents.size() : 0, kafkaEmitter.getRequestLostCount());
Assert.assertEquals(0, kafkaEmitter.getInvalidLostCount());
}
Aggregations