use of org.apache.druid.query.groupby.having.GreaterThanHavingSpec in project druid by druid-io.
the class GroupByQueryRunnerTest method testRejectForceLimitPushDownWithHaving.
@Test
public void testRejectForceLimitPushDownWithHaving() {
expectedException.expect(IAE.class);
expectedException.expectMessage("Cannot force limit push down when a having spec is present.");
final GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(QueryRunnerTestHelper.ALL_GRAN).setDimensions(new DefaultDimensionSpec(QueryRunnerTestHelper.MARKET_DIMENSION, "marketalias")).setInterval(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("marketalias", OrderByColumnSpec.Direction.DESCENDING)), 2)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT).overrideContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_FORCE_LIMIT_PUSH_DOWN, true)).setHavingSpec(new GreaterThanHavingSpec("rows", 10)).build();
query.isApplyLimitPushDown();
}
use of org.apache.druid.query.groupby.having.GreaterThanHavingSpec in project druid by druid-io.
the class GroupByQueryRunnerTest method testPostAggMergedHavingSpec.
@Test
public void testPostAggMergedHavingSpec() {
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-04-02/2011-04-04").setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("index", "index")).setPostAggregatorSpecs(ImmutableList.of(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT)).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setHavingSpec(new OrHavingSpec(ImmutableList.of(new GreaterThanHavingSpec(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT_METRIC, 1000L))));
final GroupByQuery fullQuery = builder.build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(fullQuery, "2011-04-01", "alias", "mezzanine", "rows", 6L, "index", 4420L, QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT_METRIC, (double) (6L + 4420L + 1L)), makeRow(fullQuery, "2011-04-01", "alias", "premium", "rows", 6L, "index", 4416L, QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT_METRIC, (double) (6L + 4416L + 1L)));
QueryRunner mergedRunner = factory.getToolchest().mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
// simulate two daily segments
final QueryPlus queryPlus1 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-02/2011-04-03")))));
final QueryPlus queryPlus2 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-03/2011-04-04")))));
return new MergeSequence(queryPlus.getQuery().getResultOrdering(), Sequences.simple(Arrays.asList(runner.run(queryPlus1, responseContext), runner.run(queryPlus2, responseContext))));
}
});
TestHelper.assertExpectedObjects(expectedResults, mergedRunner.run(QueryPlus.wrap(fullQuery)), "merged");
}
use of org.apache.druid.query.groupby.having.GreaterThanHavingSpec in project druid by druid-io.
the class GroupByMultiSegmentTest method testHavingAndNoLimitPushDown.
@Test
public void testHavingAndNoLimitPushDown() {
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, makeGroupByMultiRunners())), (QueryToolChest) toolChest);
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(0, 1000000)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", null)).setAggregatorSpecs(new LongSumAggregatorFactory("metA", "metA")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("dimA", OrderByColumnSpec.Direction.ASCENDING)), 1)).setHavingSpec(new GreaterThanHavingSpec("metA", 110)).setGranularity(Granularities.ALL).build();
Sequence<ResultRow> queryResult = theRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "world", "metA", 150L);
Assert.assertEquals(1, results.size());
Assert.assertEquals(expectedRow, results.get(0));
}
use of org.apache.druid.query.groupby.having.GreaterThanHavingSpec in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByWithOrderLimitHavingSpec.
@Test
public void testGroupByWithOrderLimitHavingSpec() {
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-01-25/2011-01-28").setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new DoubleSumAggregatorFactory("index", "index"), QueryRunnerTestHelper.INDEX_LONG_MIN, QueryRunnerTestHelper.INDEX_LONG_MAX, QueryRunnerTestHelper.INDEX_DOUBLE_MIN, QueryRunnerTestHelper.INDEX_DOUBLE_MAX, QueryRunnerTestHelper.INDEX_FLOAT_MIN, QueryRunnerTestHelper.INDEX_FLOAT_MAX).setGranularity(Granularities.ALL).setHavingSpec(new GreaterThanHavingSpec("index", 310L)).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("index", OrderByColumnSpec.Direction.ASCENDING)), 5));
GroupByQuery fullQuery = builder.build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(fullQuery, "2011-01-25", "alias", "business", "rows", 3L, "index", 312.38165283203125, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 101L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 105L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 101.624789D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 105.873942D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 101.62479F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 105.87394F), makeRow(fullQuery, "2011-01-25", "alias", "news", "rows", 3L, "index", 312.7834167480469, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 102L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 105L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 102.907866D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 105.266058D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 102.90787F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 105.26606F), makeRow(fullQuery, "2011-01-25", "alias", "technology", "rows", 3L, "index", 324.6412353515625, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 102L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 116L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 102.044542D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 116.979005D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 102.04454F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 116.979004F), makeRow(fullQuery, "2011-01-25", "alias", "travel", "rows", 3L, "index", 393.36322021484375, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 122L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 149L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 122.077247D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 149.125271D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 122.07725F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 149.12527F), makeRow(fullQuery, "2011-01-25", "alias", "health", "rows", 3L, "index", 511.2996826171875, QueryRunnerTestHelper.LONG_MIN_INDEX_METRIC, 159L, QueryRunnerTestHelper.LONG_MAX_INDEX_METRIC, 180L, QueryRunnerTestHelper.DOUBLE_MIN_INDEX_METRIC, 159.988606D, QueryRunnerTestHelper.DOUBLE_MAX_INDEX_METRIC, 180.575246D, QueryRunnerTestHelper.FLOAT_MIN_INDEX_METRIC, 159.9886F, QueryRunnerTestHelper.FLOAT_MAX_INDEX_METRIC, 180.57524F));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, fullQuery);
TestHelper.assertExpectedObjects(expectedResults, results, "order-limit-havingspec");
}
use of org.apache.druid.query.groupby.having.GreaterThanHavingSpec in project druid by druid-io.
the class GroupByQueryRunnerTest method testMergedPostAggHavingSpec.
@Test
public void testMergedPostAggHavingSpec() {
GroupByQuery.Builder builder = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval("2011-04-02/2011-04-04").setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setPostAggregatorSpecs(Collections.singletonList(new ArithmeticPostAggregator("rows_times_10", "*", Arrays.asList(new FieldAccessPostAggregator("rows", "rows"), new ConstantPostAggregator("const", 10L))))).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setHavingSpec(new OrHavingSpec(ImmutableList.of(new GreaterThanHavingSpec("rows_times_10", 20L), new EqualToHavingSpec("idx", 217L))));
GroupByQuery query = builder.build();
// Same query, but with expressions instead of arithmetic.
final GroupByQuery expressionQuery = query.withPostAggregatorSpecs(Collections.singletonList(new ExpressionPostAggregator("rows_times_10", "rows * 10.0", null, TestExprMacroTable.INSTANCE)));
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "2011-04-01", "alias", "business", "rows", 2L, "idx", 217L, "rows_times_10", 20.0), makeRow(query, "2011-04-01", "alias", "mezzanine", "rows", 6L, "idx", 4420L, "rows_times_10", 60.0), makeRow(query, "2011-04-01", "alias", "premium", "rows", 6L, "idx", 4416L, "rows_times_10", 60.0));
QueryRunner mergedRunner = factory.getToolchest().mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
// simulate two daily segments
final QueryPlus queryPlus1 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-02/2011-04-03")))));
final QueryPlus queryPlus2 = queryPlus.withQuery(queryPlus.getQuery().withQuerySegmentSpec(new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.of("2011-04-03/2011-04-04")))));
return new MergeSequence(queryPlus.getQuery().getResultOrdering(), Sequences.simple(Arrays.asList(runner.run(queryPlus1, responseContext), runner.run(queryPlus2, responseContext))));
}
});
ResponseContext context = ResponseContext.createEmpty();
// add an extra layer of merging, simulate broker forwarding query to historical
TestHelper.assertExpectedObjects(expectedResults, factory.getToolchest().postMergeQueryDecoration(factory.getToolchest().mergeResults(factory.getToolchest().preMergeQueryDecoration(mergedRunner))).run(QueryPlus.wrap(query)), "merged");
TestHelper.assertExpectedObjects(expectedResults, factory.getToolchest().postMergeQueryDecoration(factory.getToolchest().mergeResults(factory.getToolchest().preMergeQueryDecoration(mergedRunner))).run(QueryPlus.wrap(expressionQuery)), "merged");
}
Aggregations