Search in sources :

Example 36 with PeriodGranularity

use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.

the class GroupByQueryRunnerTest method doTestMergeResultsWithOrderBy.

private void doTestMergeResultsWithOrderBy(LimitSpec orderBySpec, List<Row> expectedResults) {
    GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.dataSource).setInterval("2011-04-02/2011-04-04").setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "alias"))).setAggregatorSpecs(Arrays.asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"))).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setLimitSpec(orderBySpec);
    final GroupByQuery fullQuery = builder.build();
    QueryRunner mergedRunner = factory.getToolchest().mergeResults(new QueryRunner<Row>() {

        @Override
        public Sequence<Row> run(Query<Row> query, Map<String, Object> responseContext) {
            // simulate two daily segments
            final Query query1 = query.withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Lists.newArrayList(new Interval("2011-04-02/2011-04-03"))));
            final Query query2 = query.withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Lists.newArrayList(new Interval("2011-04-03/2011-04-04"))));
            return new MergeSequence(query.getResultOrdering(), Sequences.simple(Arrays.asList(runner.run(query1, responseContext), runner.run(query2, responseContext))));
        }
    });
    Map<String, Object> context = Maps.newHashMap();
    TestHelper.assertExpectedObjects(expectedResults, mergedRunner.run(fullQuery, context), "merged");
}
Also used : DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) RegexFilteredDimensionSpec(io.druid.query.dimension.RegexFilteredDimensionSpec) ExtractionDimensionSpec(io.druid.query.dimension.ExtractionDimensionSpec) ListFilteredDimensionSpec(io.druid.query.dimension.ListFilteredDimensionSpec) DimensionSpec(io.druid.query.dimension.DimensionSpec) Query(io.druid.query.Query) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) PeriodGranularity(io.druid.java.util.common.granularity.PeriodGranularity) Period(org.joda.time.Period) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) Sequence(io.druid.java.util.common.guava.Sequence) MergeSequence(io.druid.java.util.common.guava.MergeSequence) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) QueryRunner(io.druid.query.QueryRunner) MergeSequence(io.druid.java.util.common.guava.MergeSequence) Row(io.druid.data.input.Row) Interval(org.joda.time.Interval)

Example 37 with PeriodGranularity

use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.

the class GroupByQueryRunnerTest method testGroupByWithRegEx.

@Test
public void testGroupByWithRegEx() throws Exception {
    GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.dataSource).setInterval("2011-04-02/2011-04-04").setDimFilter(new RegexDimFilter("quality", "auto.*", null)).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "quality"))).setAggregatorSpecs(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount)).setGranularity(new PeriodGranularity(new Period("P1M"), null, null));
    final GroupByQuery query = builder.build();
    List<Row> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "quality", "automotive", "rows", 2L));
    QueryRunner<Row> mergeRunner = factory.getToolchest().mergeResults(runner);
    Map<String, Object> context = Maps.newHashMap();
    TestHelper.assertExpectedObjects(expectedResults, mergeRunner.run(query, context), "no-limit");
}
Also used : RegexDimFilter(io.druid.query.filter.RegexDimFilter) PeriodGranularity(io.druid.java.util.common.granularity.PeriodGranularity) Period(org.joda.time.Period) DoubleMaxAggregatorFactory(io.druid.query.aggregation.DoubleMaxAggregatorFactory) LongMaxAggregatorFactory(io.druid.query.aggregation.LongMaxAggregatorFactory) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) LongFirstAggregatorFactory(io.druid.query.aggregation.first.LongFirstAggregatorFactory) FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) JavaScriptAggregatorFactory(io.druid.query.aggregation.JavaScriptAggregatorFactory) CardinalityAggregatorFactory(io.druid.query.aggregation.cardinality.CardinalityAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) HyperUniquesAggregatorFactory(io.druid.query.aggregation.hyperloglog.HyperUniquesAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) LongLastAggregatorFactory(io.druid.query.aggregation.last.LongLastAggregatorFactory) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) Row(io.druid.data.input.Row) Test(org.junit.Test)

Example 38 with PeriodGranularity

use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.

the class GroupByQueryRunnerTest method testBySegmentResultsUnOptimizedDimextraction.

@Test
public void testBySegmentResultsUnOptimizedDimextraction() {
    int segmentCount = 32;
    Result<BySegmentResultValue> singleSegmentResult = new Result<BySegmentResultValue>(new DateTime("2011-01-12T00:00:00.000Z"), new BySegmentResultValueClass(Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "mezzanine0", "rows", 6L, "idx", 4420L)), "testSegment", new Interval("2011-04-02T00:00:00.000Z/2011-04-04T00:00:00.000Z")));
    List<Result> bySegmentResults = Lists.newArrayList();
    for (int i = 0; i < segmentCount; i++) {
        bySegmentResults.add(singleSegmentResult);
    }
    GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.dataSource).setInterval("2011-04-02/2011-04-04").setDimensions(Lists.<DimensionSpec>newArrayList(new ExtractionDimensionSpec("quality", "alias", new LookupExtractionFn(new MapLookupExtractor(ImmutableMap.of("mezzanine", "mezzanine0"), false), false, null, false, false)))).setAggregatorSpecs(Arrays.asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"))).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setDimFilter(new SelectorDimFilter("quality", "mezzanine", null)).setContext(ImmutableMap.<String, Object>of("bySegment", true));
    final GroupByQuery fullQuery = builder.build();
    QueryToolChest toolChest = factory.getToolchest();
    List<QueryRunner<Row>> singleSegmentRunners = Lists.newArrayList();
    for (int i = 0; i < segmentCount; i++) {
        singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner));
    }
    ExecutorService exec = Executors.newCachedThreadPool();
    QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(Executors.newCachedThreadPool(), singleSegmentRunners)), toolChest));
    TestHelper.assertExpectedObjects(bySegmentResults, theRunner.run(fullQuery, Maps.newHashMap()), "");
    exec.shutdownNow();
}
Also used : BySegmentResultValue(io.druid.query.BySegmentResultValue) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) PeriodGranularity(io.druid.java.util.common.granularity.PeriodGranularity) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) Period(org.joda.time.Period) QueryToolChest(io.druid.query.QueryToolChest) DateTime(org.joda.time.DateTime) FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) QueryRunner(io.druid.query.QueryRunner) Result(io.druid.query.Result) LookupExtractionFn(io.druid.query.lookup.LookupExtractionFn) SelectorDimFilter(io.druid.query.filter.SelectorDimFilter) ExecutorService(java.util.concurrent.ExecutorService) MapLookupExtractor(io.druid.query.extraction.MapLookupExtractor) Interval(org.joda.time.Interval) ExtractionDimensionSpec(io.druid.query.dimension.ExtractionDimensionSpec) Test(org.junit.Test)

Example 39 with PeriodGranularity

use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.

the class GroupByQueryRunnerTest method testBySegmentResultsOptimizedDimextraction.

@Test
public void testBySegmentResultsOptimizedDimextraction() {
    int segmentCount = 32;
    Result<BySegmentResultValue> singleSegmentResult = new Result<BySegmentResultValue>(new DateTime("2011-01-12T00:00:00.000Z"), new BySegmentResultValueClass(Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "mezzanine0", "rows", 6L, "idx", 4420L)), "testSegment", new Interval("2011-04-02T00:00:00.000Z/2011-04-04T00:00:00.000Z")));
    List<Result> bySegmentResults = Lists.newArrayList();
    for (int i = 0; i < segmentCount; i++) {
        bySegmentResults.add(singleSegmentResult);
    }
    GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.dataSource).setInterval("2011-04-02/2011-04-04").setDimensions(Lists.<DimensionSpec>newArrayList(new ExtractionDimensionSpec("quality", "alias", new LookupExtractionFn(new MapLookupExtractor(ImmutableMap.of("mezzanine", "mezzanine0"), false), false, null, true, false)))).setAggregatorSpecs(Arrays.asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"))).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setDimFilter(new SelectorDimFilter("quality", "mezzanine", null)).setContext(ImmutableMap.<String, Object>of("bySegment", true));
    final GroupByQuery fullQuery = builder.build();
    QueryToolChest toolChest = factory.getToolchest();
    List<QueryRunner<Row>> singleSegmentRunners = Lists.newArrayList();
    for (int i = 0; i < segmentCount; i++) {
        singleSegmentRunners.add(toolChest.preMergeQueryDecoration(runner));
    }
    ExecutorService exec = Executors.newCachedThreadPool();
    QueryRunner theRunner = toolChest.postMergeQueryDecoration(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(Executors.newCachedThreadPool(), singleSegmentRunners)), toolChest));
    TestHelper.assertExpectedObjects(bySegmentResults, theRunner.run(fullQuery, Maps.newHashMap()), "");
    exec.shutdownNow();
}
Also used : BySegmentResultValue(io.druid.query.BySegmentResultValue) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) PeriodGranularity(io.druid.java.util.common.granularity.PeriodGranularity) BySegmentResultValueClass(io.druid.query.BySegmentResultValueClass) Period(org.joda.time.Period) QueryToolChest(io.druid.query.QueryToolChest) DateTime(org.joda.time.DateTime) FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) QueryRunner(io.druid.query.QueryRunner) Result(io.druid.query.Result) LookupExtractionFn(io.druid.query.lookup.LookupExtractionFn) SelectorDimFilter(io.druid.query.filter.SelectorDimFilter) ExecutorService(java.util.concurrent.ExecutorService) MapLookupExtractor(io.druid.query.extraction.MapLookupExtractor) Interval(org.joda.time.Interval) ExtractionDimensionSpec(io.druid.query.dimension.ExtractionDimensionSpec) Test(org.junit.Test)

Example 40 with PeriodGranularity

use of io.druid.java.util.common.granularity.PeriodGranularity in project druid by druid-io.

the class TimeseriesQueryRunnerTest method testTimeseriesWithVaryingGran.

@Test
public void testTimeseriesWithVaryingGran() {
    TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(new PeriodGranularity(new Period("P1M"), null, null)).intervals(Arrays.asList(new Interval("2011-04-02T00:00:00.000Z/2011-04-03T00:00:00.000Z"))).aggregators(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"), QueryRunnerTestHelper.qualityUniques)).descending(descending).build();
    List<Result<TimeseriesResultValue>> expectedResults1 = Arrays.asList(new Result<>(new DateTime("2011-04-01"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 13L, "idx", 5827L, "uniques", QueryRunnerTestHelper.UNIQUES_9))));
    Iterable<Result<TimeseriesResultValue>> results1 = Sequences.toList(runner.run(query1, CONTEXT), Lists.<Result<TimeseriesResultValue>>newArrayList());
    assertExpectedResults(expectedResults1, results1);
    TimeseriesQuery query2 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity("DAY").intervals(Arrays.asList(new Interval("2011-04-02T00:00:00.000Z/2011-04-03T00:00:00.000Z"))).aggregators(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"), QueryRunnerTestHelper.qualityUniques)).build();
    List<Result<TimeseriesResultValue>> expectedResults2 = Arrays.asList(new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 13L, "idx", 5827L, "uniques", QueryRunnerTestHelper.UNIQUES_9))));
    Iterable<Result<TimeseriesResultValue>> results2 = Sequences.toList(runner.run(query2, CONTEXT), Lists.<Result<TimeseriesResultValue>>newArrayList());
    assertExpectedResults(expectedResults2, results2);
}
Also used : PeriodGranularity(io.druid.java.util.common.granularity.PeriodGranularity) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) Period(org.joda.time.Period) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DoubleFirstAggregatorFactory(io.druid.query.aggregation.first.DoubleFirstAggregatorFactory) DoubleMaxAggregatorFactory(io.druid.query.aggregation.DoubleMaxAggregatorFactory) FilteredAggregatorFactory(io.druid.query.aggregation.FilteredAggregatorFactory) DoubleMinAggregatorFactory(io.druid.query.aggregation.DoubleMinAggregatorFactory) DoubleLastAggregatorFactory(io.druid.query.aggregation.last.DoubleLastAggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) DateTime(org.joda.time.DateTime) Interval(org.joda.time.Interval) Result(io.druid.query.Result) Test(org.junit.Test)

Aggregations

PeriodGranularity (io.druid.java.util.common.granularity.PeriodGranularity)41 Period (org.joda.time.Period)39 Test (org.junit.Test)36 LongSumAggregatorFactory (io.druid.query.aggregation.LongSumAggregatorFactory)26 DefaultDimensionSpec (io.druid.query.dimension.DefaultDimensionSpec)20 DateTime (org.joda.time.DateTime)19 Row (io.druid.data.input.Row)18 Interval (org.joda.time.Interval)18 FinalizeResultsQueryRunner (io.druid.query.FinalizeResultsQueryRunner)9 QueryRunner (io.druid.query.QueryRunner)9 Result (io.druid.query.Result)9 ExtractionDimensionSpec (io.druid.query.dimension.ExtractionDimensionSpec)9 DimensionSpec (io.druid.query.dimension.DimensionSpec)8 ListFilteredDimensionSpec (io.druid.query.dimension.ListFilteredDimensionSpec)7 RegexFilteredDimensionSpec (io.druid.query.dimension.RegexFilteredDimensionSpec)7 DimFilterHavingSpec (io.druid.query.groupby.having.DimFilterHavingSpec)7 SelectorDimFilter (io.druid.query.filter.SelectorDimFilter)6 GreaterThanHavingSpec (io.druid.query.groupby.having.GreaterThanHavingSpec)6 OrHavingSpec (io.druid.query.groupby.having.OrHavingSpec)6 MergeSequence (io.druid.java.util.common.guava.MergeSequence)5