Search in sources :

Example 1 with QueryStats

use of org.apache.druid.server.QueryStats in project druid by druid-io.

the class DefaultRequestLogEventTest method testDefaultRequestLogEventSerde.

@Test
public void testDefaultRequestLogEventSerde() throws Exception {
    RequestLogLine nativeLine = RequestLogLine.forNative(new TimeseriesQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2015-01-01/2015-01-02"))), true, VirtualColumns.EMPTY, null, Granularities.ALL, ImmutableList.of(), ImmutableList.of(), 5, ImmutableMap.of("key", "value")), DateTimes.of(2019, 12, 12, 3, 1), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 13L, "query/bytes", 10L, "success", true, "identity", "allowAll")));
    DefaultRequestLogEvent defaultRequestLogEvent = new DefaultRequestLogEvent(ImmutableMap.of("service", "druid-service", "host", "127.0.0.1"), "feed", nativeLine);
    String logEventJson = objectMapper.writeValueAsString(defaultRequestLogEvent);
    String expected = "{\"feed\":\"feed\",\"query\":{\"queryType\":\"timeseries\",\"dataSource\":{\"type\":\"table\",\"name\":\"dummy\"},\"intervals\":{\"type\":\"intervals\",\"intervals\":[\"2015-01-01T00:00:00.000Z/2015-01-02T00:00:00.000Z\"]},\"descending\":true,\"virtualColumns\":[],\"filter\":null,\"granularity\":{\"type\":\"all\"},\"aggregations\":[],\"postAggregations\":[],\"limit\":5,\"context\":{\"key\":\"value\"}},\"host\":\"127.0.0.1\",\"timestamp\":\"2019-12-12T03:01:00.000Z\",\"service\":\"druid-service\",\"sql\":null,\"sqlQueryContext\":{},\"remoteAddr\":\"127.0.0.1\",\"queryStats\":{\"query/time\":13,\"query/bytes\":10,\"success\":true,\"identity\":\"allowAll\"}}";
    Assert.assertEquals(objectMapper.readTree(expected), objectMapper.readTree(logEventJson));
}
Also used : TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) TableDataSource(org.apache.druid.query.TableDataSource) QueryStats(org.apache.druid.server.QueryStats) RequestLogLine(org.apache.druid.server.RequestLogLine) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) Test(org.junit.Test)

Example 2 with QueryStats

use of org.apache.druid.server.QueryStats in project druid by druid-io.

the class FilteredRequestLoggerTest method testNotFilterAboveThreshold.

@Test
public void testNotFilterAboveThreshold() throws IOException {
    RequestLogger delegate = EasyMock.createStrictMock(RequestLogger.class);
    delegate.logNativeQuery(EasyMock.anyObject());
    EasyMock.expectLastCall().times(2);
    delegate.logSqlQuery(EasyMock.anyObject());
    EasyMock.expectLastCall().times(2);
    FilteredRequestLoggerProvider.FilteredRequestLogger logger = new FilteredRequestLoggerProvider.FilteredRequestLogger(delegate, 1000, 2000, ImmutableList.of());
    RequestLogLine nativeRequestLogLine = EasyMock.createMock(RequestLogLine.class);
    EasyMock.expect(nativeRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("query/time", 10000))).once();
    EasyMock.expect(nativeRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("query/time", 1000))).once();
    EasyMock.expect(nativeRequestLogLine.getQuery()).andReturn(testSegmentMetadataQuery).times(2);
    RequestLogLine sqlRequestLogLine = EasyMock.createMock(RequestLogLine.class);
    EasyMock.expect(sqlRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("sqlQuery/time", 10000))).once();
    EasyMock.expect(sqlRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("sqlQuery/time", 2000))).once();
    EasyMock.expect(sqlRequestLogLine.getQuery()).andReturn(testSegmentMetadataQuery).times(2);
    EasyMock.replay(nativeRequestLogLine, sqlRequestLogLine, delegate);
    logger.logNativeQuery(nativeRequestLogLine);
    logger.logNativeQuery(nativeRequestLogLine);
    logger.logSqlQuery(sqlRequestLogLine);
    logger.logSqlQuery(sqlRequestLogLine);
    EasyMock.verify(nativeRequestLogLine, sqlRequestLogLine, delegate);
}
Also used : QueryStats(org.apache.druid.server.QueryStats) RequestLogLine(org.apache.druid.server.RequestLogLine) Test(org.junit.Test)

Example 3 with QueryStats

use of org.apache.druid.server.QueryStats in project druid by druid-io.

the class FilteredRequestLoggerTest method testFilterBelowThreshold.

@Test
public void testFilterBelowThreshold() throws IOException {
    RequestLogger delegate = EasyMock.createStrictMock(RequestLogger.class);
    delegate.logNativeQuery(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new IOException());
    delegate.logSqlQuery(EasyMock.anyObject());
    EasyMock.expectLastCall().andThrow(new IOException());
    FilteredRequestLoggerProvider.FilteredRequestLogger logger = new FilteredRequestLoggerProvider.FilteredRequestLogger(delegate, 1000, 2000, ImmutableList.of());
    RequestLogLine nativeRequestLogLine = EasyMock.createMock(RequestLogLine.class);
    EasyMock.expect(nativeRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("query/time", 100))).once();
    RequestLogLine sqlRequestLogLine = EasyMock.createMock(RequestLogLine.class);
    EasyMock.expect(sqlRequestLogLine.getQueryStats()).andReturn(new QueryStats(ImmutableMap.of("sqlQuery/time", 1000)));
    EasyMock.replay(nativeRequestLogLine, sqlRequestLogLine, delegate);
    logger.logNativeQuery(nativeRequestLogLine);
    logger.logSqlQuery(sqlRequestLogLine);
}
Also used : QueryStats(org.apache.druid.server.QueryStats) RequestLogLine(org.apache.druid.server.RequestLogLine) IOException(java.io.IOException) Test(org.junit.Test)

Example 4 with QueryStats

use of org.apache.druid.server.QueryStats in project druid by druid-io.

the class MovingAverageQueryRunner method run.

@Override
public Sequence<Row> run(QueryPlus<Row> query, ResponseContext responseContext) {
    MovingAverageQuery maq = (MovingAverageQuery) query.getQuery();
    List<Interval> intervals;
    final Period period;
    // Get the largest bucket from the list of averagers
    Optional<Integer> opt = maq.getAveragerSpecs().stream().map(AveragerFactory::getNumBuckets).max(Integer::compare);
    int buckets = opt.orElse(0);
    // Extend the interval beginning by specified bucket - 1
    if (maq.getGranularity() instanceof PeriodGranularity) {
        period = ((PeriodGranularity) maq.getGranularity()).getPeriod();
        int offset = buckets <= 0 ? 0 : (1 - buckets);
        intervals = maq.getIntervals().stream().map(i -> new Interval(i.getStart().withPeriodAdded(period, offset), i.getEnd())).collect(Collectors.toList());
    } else {
        throw new ISE("Only PeriodGranulaity is supported for movingAverage queries");
    }
    Sequence<Row> resultsSeq;
    DataSource dataSource = maq.getDataSource();
    if (maq.getDimensions() != null && !maq.getDimensions().isEmpty() && (dataSource instanceof TableDataSource || dataSource instanceof UnionDataSource || dataSource instanceof QueryDataSource)) {
        // build groupBy query from movingAverage query
        GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(dataSource).setInterval(intervals).setDimFilter(maq.getFilter()).setGranularity(maq.getGranularity()).setDimensions(maq.getDimensions()).setAggregatorSpecs(maq.getAggregatorSpecs()).setPostAggregatorSpecs(maq.getPostAggregatorSpecs()).setContext(maq.getContext());
        GroupByQuery gbq = builder.build();
        ResponseContext gbqResponseContext = ResponseContext.createEmpty();
        gbqResponseContext.merge(responseContext);
        gbqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(gbq));
        Sequence<ResultRow> results = gbq.getRunner(walker).run(QueryPlus.wrap(gbq), gbqResponseContext);
        try {
            // use localhost for remote address
            requestLogger.logNativeQuery(RequestLogLine.forNative(gbq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
        resultsSeq = results.map(row -> row.toMapBasedRow(gbq));
    } else {
        // no dimensions, so optimize this as a TimeSeries
        TimeseriesQuery tsq = new TimeseriesQuery(dataSource, new MultipleIntervalSegmentSpec(intervals), false, null, maq.getFilter(), maq.getGranularity(), maq.getAggregatorSpecs(), maq.getPostAggregatorSpecs(), 0, maq.getContext());
        ResponseContext tsqResponseContext = ResponseContext.createEmpty();
        tsqResponseContext.merge(responseContext);
        tsqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(tsq));
        Sequence<Result<TimeseriesResultValue>> results = tsq.getRunner(walker).run(QueryPlus.wrap(tsq), tsqResponseContext);
        try {
            // use localhost for remote address
            requestLogger.logNativeQuery(RequestLogLine.forNative(tsq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
        } catch (Exception e) {
            throw Throwables.propagate(e);
        }
        resultsSeq = Sequences.map(results, new TimeseriesResultToRow());
    }
    // Process into period buckets
    Sequence<RowBucket> bucketedMovingAvgResults = Sequences.simple(new RowBucketIterable(resultsSeq, intervals, period));
    // Apply the windows analysis functions
    Sequence<Row> movingAvgResults = Sequences.simple(new MovingAverageIterable(bucketedMovingAvgResults, maq.getDimensions(), maq.getAveragerSpecs(), maq.getPostAggregatorSpecs(), maq.getAggregatorSpecs()));
    // Apply any postAveragers
    Sequence<Row> movingAvgResultsWithPostAveragers = Sequences.map(movingAvgResults, new PostAveragerAggregatorCalculator(maq));
    // remove rows outside the reporting window
    List<Interval> reportingIntervals = maq.getIntervals();
    movingAvgResults = Sequences.filter(movingAvgResultsWithPostAveragers, row -> reportingIntervals.stream().anyMatch(i -> i.contains(row.getTimestamp())));
    // Apply any having, sorting, and limits
    movingAvgResults = maq.applyLimit(movingAvgResults);
    return movingAvgResults;
}
Also used : QueryPlus(org.apache.druid.query.QueryPlus) MapBasedRow(org.apache.druid.data.input.MapBasedRow) AveragerFactory(org.apache.druid.query.movingaverage.averagers.AveragerFactory) TimeseriesResultValue(org.apache.druid.query.timeseries.TimeseriesResultValue) Row(org.apache.druid.data.input.Row) QueryStats(org.apache.druid.server.QueryStats) Interval(org.joda.time.Interval) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) Map(java.util.Map) QueryRunner(org.apache.druid.query.QueryRunner) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) QuerySegmentWalker(org.apache.druid.query.QuerySegmentWalker) Sequences(org.apache.druid.java.util.common.guava.Sequences) Nullable(javax.annotation.Nullable) DateTimes(org.apache.druid.java.util.common.DateTimes) Sequence(org.apache.druid.java.util.common.guava.Sequence) Period(org.joda.time.Period) Function(com.google.common.base.Function) ImmutableMap(com.google.common.collect.ImmutableMap) ResponseContext(org.apache.druid.query.context.ResponseContext) ResultRow(org.apache.druid.query.groupby.ResultRow) DataSource(org.apache.druid.query.DataSource) Throwables(com.google.common.base.Throwables) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) RequestLogger(org.apache.druid.server.log.RequestLogger) ISE(org.apache.druid.java.util.common.ISE) Collectors(java.util.stream.Collectors) QueryContexts(org.apache.druid.query.QueryContexts) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) Result(org.apache.druid.query.Result) List(java.util.List) UnionDataSource(org.apache.druid.query.UnionDataSource) RequestLogLine(org.apache.druid.server.RequestLogLine) Optional(java.util.Optional) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) MultipleIntervalSegmentSpec(org.apache.druid.query.spec.MultipleIntervalSegmentSpec) Result(org.apache.druid.query.Result) GroupByQuery(org.apache.druid.query.groupby.GroupByQuery) ResponseContext(org.apache.druid.query.context.ResponseContext) ISE(org.apache.druid.java.util.common.ISE) ResultRow(org.apache.druid.query.groupby.ResultRow) TimeseriesQuery(org.apache.druid.query.timeseries.TimeseriesQuery) Period(org.joda.time.Period) UnionDataSource(org.apache.druid.query.UnionDataSource) DataSource(org.apache.druid.query.DataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) UnionDataSource(org.apache.druid.query.UnionDataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) QueryStats(org.apache.druid.server.QueryStats) MapBasedRow(org.apache.druid.data.input.MapBasedRow) Row(org.apache.druid.data.input.Row) ResultRow(org.apache.druid.query.groupby.ResultRow) Interval(org.joda.time.Interval)

Example 5 with QueryStats

use of org.apache.druid.server.QueryStats in project druid by druid-io.

the class KafkaEmitterTest method testKafkaEmitter.

// there is 1 seconds wait in kafka emitter before it starts sending events to broker, set a timeout for 5 seconds
@Test(timeout = 5_000)
public void testKafkaEmitter() throws InterruptedException {
    final List<ServiceMetricEvent> serviceMetricEvents = ImmutableList.of(ServiceMetricEvent.builder().build("m1", 1).build("service", "host"));
    final List<AlertEvent> alertEvents = ImmutableList.of(new AlertEvent("service", "host", "description"));
    final List<RequestLogEvent> requestLogEvents = ImmutableList.of(DefaultRequestLogEventBuilderFactory.instance().createRequestLogEventBuilder("requests", RequestLogLine.forSql("", null, DateTimes.nowUtc(), null, new QueryStats(ImmutableMap.of()))).build("service", "host"));
    int totalEvents = serviceMetricEvents.size() + alertEvents.size() + requestLogEvents.size();
    int totalEventsExcludingRequestLogEvents = totalEvents - requestLogEvents.size();
    final CountDownLatch countDownSentEvents = new CountDownLatch(requestTopic == null ? totalEventsExcludingRequestLogEvents : totalEvents);
    final KafkaProducer<String, String> producer = mock(KafkaProducer.class);
    final KafkaEmitter kafkaEmitter = new KafkaEmitter(new KafkaEmitterConfig("", "metrics", "alerts", requestTopic, "test-cluster", null), new ObjectMapper()) {

        @Override
        protected Producer<String, String> setKafkaProducer() {
            // override send interval to 1 second
            sendInterval = 1;
            return producer;
        }
    };
    when(producer.send(any(), any())).then((invocation) -> {
        countDownSentEvents.countDown();
        return null;
    });
    kafkaEmitter.start();
    for (Event event : serviceMetricEvents) {
        kafkaEmitter.emit(event);
    }
    for (Event event : alertEvents) {
        kafkaEmitter.emit(event);
    }
    for (Event event : requestLogEvents) {
        kafkaEmitter.emit(event);
    }
    countDownSentEvents.await();
    Assert.assertEquals(0, kafkaEmitter.getMetricLostCount());
    Assert.assertEquals(0, kafkaEmitter.getAlertLostCount());
    Assert.assertEquals(requestTopic == null ? requestLogEvents.size() : 0, kafkaEmitter.getRequestLostCount());
    Assert.assertEquals(0, kafkaEmitter.getInvalidLostCount());
}
Also used : AlertEvent(org.apache.druid.java.util.emitter.service.AlertEvent) CountDownLatch(java.util.concurrent.CountDownLatch) QueryStats(org.apache.druid.server.QueryStats) RequestLogEvent(org.apache.druid.server.log.RequestLogEvent) AlertEvent(org.apache.druid.java.util.emitter.service.AlertEvent) RequestLogEvent(org.apache.druid.server.log.RequestLogEvent) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) Event(org.apache.druid.java.util.emitter.core.Event) ServiceMetricEvent(org.apache.druid.java.util.emitter.service.ServiceMetricEvent) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Test(org.junit.Test)

Aggregations

QueryStats (org.apache.druid.server.QueryStats)8 RequestLogLine (org.apache.druid.server.RequestLogLine)7 Test (org.junit.Test)7 TableDataSource (org.apache.druid.query.TableDataSource)3 MultipleIntervalSegmentSpec (org.apache.druid.query.spec.MultipleIntervalSegmentSpec)3 TimeseriesQuery (org.apache.druid.query.timeseries.TimeseriesQuery)3 IOException (java.io.IOException)2 HashMap (java.util.HashMap)2 DateTime (org.joda.time.DateTime)2 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 Function (com.google.common.base.Function)1 Throwables (com.google.common.base.Throwables)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 List (java.util.List)1 Map (java.util.Map)1 Optional (java.util.Optional)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 Collectors (java.util.stream.Collectors)1 Nullable (javax.annotation.Nullable)1 MapBasedRow (org.apache.druid.data.input.MapBasedRow)1