use of org.apache.druid.query.groupby.orderby.OrderByColumnSpec in project druid by druid-io.
the class GroupByQueryRunnerTest method testRejectForceLimitPushDownWithHaving.
@Test
public void testRejectForceLimitPushDownWithHaving() {
expectedException.expect(IAE.class);
expectedException.expectMessage("Cannot force limit push down when a having spec is present.");
final GroupByQuery query = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(QueryRunnerTestHelper.ALL_GRAN).setDimensions(new DefaultDimensionSpec(QueryRunnerTestHelper.MARKET_DIMENSION, "marketalias")).setInterval(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("marketalias", OrderByColumnSpec.Direction.DESCENDING)), 2)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT).overrideContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_FORCE_LIMIT_PUSH_DOWN, true)).setHavingSpec(new GreaterThanHavingSpec("rows", 10)).build();
query.isApplyLimitPushDown();
}
use of org.apache.druid.query.groupby.orderby.OrderByColumnSpec in project druid by druid-io.
the class GroupByQueryRunnerTest method testSubqueryWithOuterCountAggregator.
@Test
public void testSubqueryWithOuterCountAggregator() {
final GroupByQuery subquery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setDimensions(new DefaultDimensionSpec("quality", "alias")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setLimitSpec(new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("alias", OrderByColumnSpec.Direction.ASCENDING)), null)).build();
final GroupByQuery query = makeQueryBuilder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new ArrayList<>()).setAggregatorSpecs(new CountAggregatorFactory("count")).setGranularity(QueryRunnerTestHelper.ALL_GRAN).build();
if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
expectedException.expect(ISE.class);
expectedException.expectMessage("Unknown column in order clause");
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
} else {
List<ResultRow> expectedResults = Collections.singletonList(makeRow(query, "2011-04-01", "count", 18L));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "subquery-count-agg");
}
}
use of org.apache.druid.query.groupby.orderby.OrderByColumnSpec in project druid by druid-io.
the class GroupByQueryRunnerTest method testSubqueryWithOuterMaxOnDiskStorageContextOverride.
@Test
public void testSubqueryWithOuterMaxOnDiskStorageContextOverride() {
final GroupByQuery subquery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setDimensions(new DefaultDimensionSpec("quality", "alias")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setLimitSpec(new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("alias", OrderByColumnSpec.Direction.ASCENDING)), null)).overrideContext(ImmutableMap.of("maxOnDiskStorage", Integer.MAX_VALUE, "bufferGrouperMaxSize", Integer.MAX_VALUE)).build();
final GroupByQuery query = makeQueryBuilder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new ArrayList<>()).setAggregatorSpecs(new CountAggregatorFactory("count")).setGranularity(QueryRunnerTestHelper.ALL_GRAN).overrideContext(ImmutableMap.of("maxOnDiskStorage", 0, "bufferGrouperMaxSize", 0)).build();
if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
expectedException.expect(ISE.class);
expectedException.expectMessage("Unknown column in order clause");
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
} else {
expectedException.expect(ResourceLimitExceededException.class);
expectedException.expectMessage("Not enough aggregation buffer space to execute this query");
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
}
}
use of org.apache.druid.query.groupby.orderby.OrderByColumnSpec in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method testPartialLimitPushDownMerge.
@Test
public void testPartialLimitPushDownMerge() {
// one segment's results use limit push down, the other doesn't because of insufficient buffer capacity
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, getRunner1())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> theRunner2 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(tooSmallGroupByFactory.mergeRunners(executorService, getRunner2())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> theRunner3 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
return Sequences.simple(ImmutableList.of(theRunner.run(queryPlus, responseContext), theRunner2.run(queryPlus, responseContext))).flatMerge(Function.identity(), queryPlus.getQuery().getResultOrdering());
}
}), (QueryToolChest) toolChest);
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(0, 1000000)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", null)).setAggregatorSpecs(new LongSumAggregatorFactory("metA", "metA")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("dimA", OrderByColumnSpec.Direction.DESCENDING)), 3)).setGranularity(Granularities.ALL).build();
Sequence<ResultRow> queryResult = theRunner3.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow0 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "zortaxx", "metA", 999L);
ResultRow expectedRow1 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "zebra", "metA", 180L);
ResultRow expectedRow2 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "world", "metA", 150L);
Assert.assertEquals(3, results.size());
Assert.assertEquals(expectedRow0, results.get(0));
Assert.assertEquals(expectedRow1, results.get(1));
Assert.assertEquals(expectedRow2, results.get(2));
}
use of org.apache.druid.query.groupby.orderby.OrderByColumnSpec in project druid by druid-io.
the class GroupByMultiSegmentTest method testHavingAndNoLimitPushDown.
@Test
public void testHavingAndNoLimitPushDown() {
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, makeGroupByMultiRunners())), (QueryToolChest) toolChest);
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(0, 1000000)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", null)).setAggregatorSpecs(new LongSumAggregatorFactory("metA", "metA")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("dimA", OrderByColumnSpec.Direction.ASCENDING)), 1)).setHavingSpec(new GreaterThanHavingSpec("metA", 110)).setGranularity(Granularities.ALL).build();
Sequence<ResultRow> queryResult = theRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "world", "metA", 150L);
Assert.assertEquals(1, results.size());
Assert.assertEquals(expectedRow, results.get(0));
}
Aggregations