Search in sources :

Example 11 with TimeseriesQuery

use of io.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.

the class SpecificSegmentQueryRunnerTest method testRetry.

@Test
public void testRetry() throws Exception {
    final ObjectMapper mapper = new DefaultObjectMapper();
    SegmentDescriptor descriptor = new SegmentDescriptor(new Interval("2012-01-01T00:00:00Z/P1D"), "version", 0);
    final SpecificSegmentQueryRunner queryRunner = new SpecificSegmentQueryRunner(new QueryRunner() {

        @Override
        public Sequence run(Query query, Map responseContext) {
            return new Sequence() {

                @Override
                public Object accumulate(Object initValue, Accumulator accumulator) {
                    throw new SegmentMissingException("FAILSAUCE");
                }

                @Override
                public Yielder<Object> toYielder(Object initValue, YieldingAccumulator accumulator) {
                    throw new SegmentMissingException("FAILSAUCE");
                }
            };
        }
    }, new SpecificSegmentSpec(descriptor));
    // from accumulate
    Map<String, Object> responseContext = Maps.newHashMap();
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("foo").granularity(Granularities.ALL).intervals(ImmutableList.of(new Interval("2012-01-01T00:00:00Z/P1D"))).aggregators(ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("rows"))).build();
    Sequence results = queryRunner.run(query, responseContext);
    Sequences.toList(results, Lists.newArrayList());
    validate(mapper, descriptor, responseContext);
    // from toYielder
    responseContext = Maps.newHashMap();
    results = queryRunner.run(query, responseContext);
    results.toYielder(null, new YieldingAccumulator() {

        final List lists = Lists.newArrayList();

        @Override
        public Object accumulate(Object accumulated, Object in) {
            lists.add(in);
            return in;
        }
    });
    validate(mapper, descriptor, responseContext);
}
Also used : Accumulator(io.druid.java.util.common.guava.Accumulator) YieldingAccumulator(io.druid.java.util.common.guava.YieldingAccumulator) Yielder(io.druid.java.util.common.guava.Yielder) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) Query(io.druid.query.Query) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) SegmentMissingException(io.druid.segment.SegmentMissingException) Sequence(io.druid.java.util.common.guava.Sequence) YieldingAccumulator(io.druid.java.util.common.guava.YieldingAccumulator) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) QueryRunner(io.druid.query.QueryRunner) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) SegmentDescriptor(io.druid.query.SegmentDescriptor) ImmutableList(com.google.common.collect.ImmutableList) List(java.util.List) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Map(java.util.Map) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 12 with TimeseriesQuery

use of io.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.

the class SpecificSegmentQueryRunnerTest method testRetry2.

@SuppressWarnings("unchecked")
@Test
public void testRetry2() throws Exception {
    final ObjectMapper mapper = new DefaultObjectMapper();
    SegmentDescriptor descriptor = new SegmentDescriptor(new Interval("2012-01-01T00:00:00Z/P1D"), "version", 0);
    TimeseriesResultBuilder builder = new TimeseriesResultBuilder(new DateTime("2012-01-01T00:00:00Z"));
    CountAggregator rows = new CountAggregator();
    rows.aggregate();
    builder.addMetric("rows", rows);
    final Result<TimeseriesResultValue> value = builder.build();
    final SpecificSegmentQueryRunner queryRunner = new SpecificSegmentQueryRunner(new QueryRunner() {

        @Override
        public Sequence run(Query query, Map responseContext) {
            return Sequences.withEffect(Sequences.simple(Arrays.asList(value)), new Runnable() {

                @Override
                public void run() {
                    throw new SegmentMissingException("FAILSAUCE");
                }
            }, MoreExecutors.sameThreadExecutor());
        }
    }, new SpecificSegmentSpec(descriptor));
    final Map<String, Object> responseContext = Maps.newHashMap();
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("foo").granularity(Granularities.ALL).intervals(ImmutableList.of(new Interval("2012-01-01T00:00:00Z/P1D"))).aggregators(ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("rows"))).build();
    Sequence results = queryRunner.run(query, responseContext);
    List<Result<TimeseriesResultValue>> res = Sequences.toList(results, Lists.<Result<TimeseriesResultValue>>newArrayList());
    Assert.assertEquals(1, res.size());
    Result<TimeseriesResultValue> theVal = res.get(0);
    Assert.assertTrue(1L == theVal.getValue().getLongMetric("rows"));
    validate(mapper, descriptor, responseContext);
}
Also used : TimeseriesResultValue(io.druid.query.timeseries.TimeseriesResultValue) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) Query(io.druid.query.Query) SegmentMissingException(io.druid.segment.SegmentMissingException) DateTime(org.joda.time.DateTime) Result(io.druid.query.Result) SegmentDescriptor(io.druid.query.SegmentDescriptor) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) TimeseriesResultBuilder(io.druid.query.timeseries.TimeseriesResultBuilder) Sequence(io.druid.java.util.common.guava.Sequence) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) QueryRunner(io.druid.query.QueryRunner) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) CountAggregator(io.druid.query.aggregation.CountAggregator) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Map(java.util.Map) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 13 with TimeseriesQuery

use of io.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.

the class TopNQueryRunnerTest method testTopNDimExtractionToOne.

@Test
public void testTopNDimExtractionToOne() throws IOException {
    TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(QueryRunnerTestHelper.allGran).dimension(new ExtractionDimensionSpec(QueryRunnerTestHelper.marketDimension, QueryRunnerTestHelper.marketDimension, new JavaScriptExtractionFn("function(f) { return \"POTATO\"; }", false, JavaScriptConfig.getEnabledInstance()))).metric("rows").threshold(10).intervals(QueryRunnerTestHelper.fullOnInterval).aggregators(QueryRunnerTestHelper.commonAggregators).postAggregators(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant)).build();
    Granularity gran = Granularities.DAY;
    TimeseriesQuery tsQuery = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(gran).intervals(QueryRunnerTestHelper.fullOnInterval).aggregators(Arrays.asList(QueryRunnerTestHelper.rowsCount, QueryRunnerTestHelper.indexDoubleSum, QueryRunnerTestHelper.qualityUniques)).postAggregators(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant)).build();
    List<Result<TopNResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>of("addRowsIndexConstant", 504542.5071372986D, "index", 503332.5071372986D, QueryRunnerTestHelper.marketDimension, "POTATO", "uniques", QueryRunnerTestHelper.UNIQUES_9, "rows", 1209L)))));
    List<Result<TopNResultValue>> list = Sequences.toList(runWithMerge(query), new ArrayList<Result<TopNResultValue>>());
    Assert.assertEquals(list.size(), 1);
    Assert.assertEquals("Didn't merge results", list.get(0).getValue().getValue().size(), 1);
    TestHelper.assertExpectedResults(expectedResults, list, "Failed to match");
}
Also used : HyperUniqueFinalizingPostAggregator(io.druid.query.aggregation.hyperloglog.HyperUniqueFinalizingPostAggregator) PostAggregator(io.druid.query.aggregation.PostAggregator) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) JavaScriptExtractionFn(io.druid.query.extraction.JavaScriptExtractionFn) Granularity(io.druid.java.util.common.granularity.Granularity) DateTime(org.joda.time.DateTime) Result(io.druid.query.Result) ExtractionDimensionSpec(io.druid.query.dimension.ExtractionDimensionSpec) Test(org.junit.Test)

Example 14 with TimeseriesQuery

use of io.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.

the class AppendTest method testTimeSeries2.

@Test
public void testTimeSeries2() {
    List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<TimeseriesResultValue>(new DateTime("2011-01-12T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 7L).put("index", 500.0D).put("addRowsIndexConstant", 508.0D).put("uniques", 0.0D).put("maxIndex", 100.0D).put("minIndex", 0.0D).build())));
    TimeseriesQuery query = makeTimeseriesQuery();
    QueryRunner runner = TestQueryRunners.makeTimeSeriesQueryRunner(segment2);
    HashMap<String, Object> context = new HashMap<String, Object>();
    TestHelper.assertExpectedResults(expectedResults, runner.run(query, context));
}
Also used : TimeseriesResultValue(io.druid.query.timeseries.TimeseriesResultValue) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) HashMap(java.util.HashMap) DateTime(org.joda.time.DateTime) QueryRunner(io.druid.query.QueryRunner) Result(io.druid.query.Result) Test(org.junit.Test)

Example 15 with TimeseriesQuery

use of io.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.

the class AppendTest method testFilteredTimeSeries.

@Test
public void testFilteredTimeSeries() {
    List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<TimeseriesResultValue>(new DateTime("2011-01-12T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 5L).put("index", 500.0D).put("addRowsIndexConstant", 506.0D).put("uniques", 1.0002442201269182D).put("maxIndex", 100.0D).put("minIndex", 100.0D).build())));
    TimeseriesQuery query = makeFilteredTimeseriesQuery();
    QueryRunner runner = TestQueryRunners.makeTimeSeriesQueryRunner(segment);
    HashMap<String, Object> context = new HashMap<String, Object>();
    TestHelper.assertExpectedResults(expectedResults, runner.run(query, context));
}
Also used : TimeseriesResultValue(io.druid.query.timeseries.TimeseriesResultValue) TimeseriesQuery(io.druid.query.timeseries.TimeseriesQuery) HashMap(java.util.HashMap) DateTime(org.joda.time.DateTime) QueryRunner(io.druid.query.QueryRunner) Result(io.druid.query.Result) Test(org.junit.Test)

Aggregations

TimeseriesQuery (io.druid.query.timeseries.TimeseriesQuery)43 Result (io.druid.query.Result)33 TimeseriesResultValue (io.druid.query.timeseries.TimeseriesResultValue)32 Test (org.junit.Test)32 DateTime (org.joda.time.DateTime)23 QueryRunner (io.druid.query.QueryRunner)21 LongSumAggregatorFactory (io.druid.query.aggregation.LongSumAggregatorFactory)21 Interval (org.joda.time.Interval)21 CountAggregatorFactory (io.druid.query.aggregation.CountAggregatorFactory)19 HashMap (java.util.HashMap)19 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)17 FinalizeResultsQueryRunner (io.druid.query.FinalizeResultsQueryRunner)16 TimeseriesQueryRunnerFactory (io.druid.query.timeseries.TimeseriesQueryRunnerFactory)15 TimeseriesQueryQueryToolChest (io.druid.query.timeseries.TimeseriesQueryQueryToolChest)14 TimeseriesQueryEngine (io.druid.query.timeseries.TimeseriesQueryEngine)13 IOException (java.io.IOException)10 SpatialDimFilter (io.druid.query.filter.SpatialDimFilter)9 ArrayList (java.util.ArrayList)8 DoubleSumAggregatorFactory (io.druid.query.aggregation.DoubleSumAggregatorFactory)7 FilteredAggregatorFactory (io.druid.query.aggregation.FilteredAggregatorFactory)6