use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.
the class GroupByLimitPushDownInsufficientBufferTest method testPartialLimitPushDownMerge.
@Test
public void testPartialLimitPushDownMerge() {
// one segment's results use limit push down, the other doesn't because of insufficient buffer capacity
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, getRunner1())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> theRunner2 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(tooSmallGroupByFactory.mergeRunners(executorService, getRunner2())), (QueryToolChest) toolChest);
QueryRunner<ResultRow> theRunner3 = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(new QueryRunner<ResultRow>() {
@Override
public Sequence<ResultRow> run(QueryPlus<ResultRow> queryPlus, ResponseContext responseContext) {
return Sequences.simple(ImmutableList.of(theRunner.run(queryPlus, responseContext), theRunner2.run(queryPlus, responseContext))).flatMerge(Function.identity(), queryPlus.getQuery().getResultOrdering());
}
}), (QueryToolChest) toolChest);
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(0, 1000000)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", null)).setAggregatorSpecs(new LongSumAggregatorFactory("metA", "metA")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("dimA", OrderByColumnSpec.Direction.DESCENDING)), 3)).setGranularity(Granularities.ALL).build();
Sequence<ResultRow> queryResult = theRunner3.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow0 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "zortaxx", "metA", 999L);
ResultRow expectedRow1 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "zebra", "metA", 180L);
ResultRow expectedRow2 = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "world", "metA", 150L);
Assert.assertEquals(3, results.size());
Assert.assertEquals(expectedRow0, results.get(0));
Assert.assertEquals(expectedRow1, results.get(1));
Assert.assertEquals(expectedRow2, results.get(2));
}
use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.
the class GroupByMultiSegmentTest method testHavingAndNoLimitPushDown.
@Test
public void testHavingAndNoLimitPushDown() {
QueryToolChest<ResultRow, GroupByQuery> toolChest = groupByFactory.getToolchest();
QueryRunner<ResultRow> theRunner = new FinalizeResultsQueryRunner<>(toolChest.mergeResults(groupByFactory.mergeRunners(executorService, makeGroupByMultiRunners())), (QueryToolChest) toolChest);
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(0, 1000000)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", null)).setAggregatorSpecs(new LongSumAggregatorFactory("metA", "metA")).setLimitSpec(new DefaultLimitSpec(Collections.singletonList(new OrderByColumnSpec("dimA", OrderByColumnSpec.Direction.ASCENDING)), 1)).setHavingSpec(new GreaterThanHavingSpec("metA", 110)).setGranularity(Granularities.ALL).build();
Sequence<ResultRow> queryResult = theRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow = GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "dimA", "world", "metA", 150L);
Assert.assertEquals(1, results.size());
Assert.assertEquals(expectedRow, results.get(0));
}
use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.
the class GroupByQueryQueryToolChestTest method testQueryCacheKeyWithLimitSpec.
@Test
public void testQueryCacheKeyWithLimitSpec() {
final GroupByQuery query1 = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setLimitSpec(new DefaultLimitSpec(null, 100)).overrideContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, "true")).build();
final GroupByQuery query2 = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setLimitSpec(new DefaultLimitSpec(null, 1000)).build();
final GroupByQuery queryNoLimit = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
final CacheStrategy<ResultRow, Object, GroupByQuery> strategy1 = new GroupByQueryQueryToolChest(null).getCacheStrategy(query1);
final CacheStrategy<ResultRow, Object, GroupByQuery> strategy2 = new GroupByQueryQueryToolChest(null).getCacheStrategy(query2);
Assert.assertFalse(Arrays.equals(strategy1.computeCacheKey(query1), strategy2.computeCacheKey(query2)));
Assert.assertFalse(Arrays.equals(strategy1.computeCacheKey(query1), strategy2.computeCacheKey(queryNoLimit)));
}
use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.
the class GroupByQueryQueryToolChestTest method testResultsAsArraysDayGran.
@Test
public void testResultsAsArraysDayGran() {
final GroupByQuery query = new GroupByQuery.Builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setGranularity(Granularities.DAY).setDimensions(new DefaultDimensionSpec("col", "dim")).setInterval(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setAggregatorSpecs(QueryRunnerTestHelper.COMMON_DOUBLE_AGGREGATORS).setPostAggregatorSpecs(ImmutableList.of(QueryRunnerTestHelper.CONSTANT)).build();
QueryToolChestTestHelper.assertArrayResultsEquals(ImmutableList.of(new Object[] { DateTimes.of("2000-01-01").getMillis(), "foo", 1L, 2L, 3L, 1L }, new Object[] { DateTimes.of("2000-01-02").getMillis(), "bar", 4L, 5L, 6L, 1L }), new GroupByQueryQueryToolChest(null, null).resultsAsArrays(query, Sequences.simple(ImmutableList.of(makeRow(query, "2000-01-01", "dim", "foo", "rows", 1L, "index", 2L, "uniques", 3L, "const", 1L), makeRow(query, "2000-01-02", "dim", "bar", "rows", 4L, "index", 5L, "uniques", 6L, "const", 1L)))));
}
use of org.apache.druid.query.dimension.DefaultDimensionSpec in project druid by druid-io.
the class GroupByQueryQueryToolChestTest method testQueryCacheKeyWithLimitSpecPushDownUsingContext.
@Test
public void testQueryCacheKeyWithLimitSpecPushDownUsingContext() {
final GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setLimitSpec(new DefaultLimitSpec(null, 100));
final GroupByQuery query1 = builder.overrideContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, "true")).build();
final GroupByQuery query2 = builder.overrideContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_APPLY_LIMIT_PUSH_DOWN, "false")).build();
final CacheStrategy<ResultRow, Object, GroupByQuery> strategy1 = new GroupByQueryQueryToolChest(null).getCacheStrategy(query1);
final CacheStrategy<ResultRow, Object, GroupByQuery> strategy2 = new GroupByQueryQueryToolChest(null).getCacheStrategy(query2);
Assert.assertFalse(Arrays.equals(strategy1.computeCacheKey(query1), strategy2.computeCacheKey(query2)));
Assert.assertTrue(Arrays.equals(strategy1.computeResultLevelCacheKey(query1), strategy2.computeResultLevelCacheKey(query2)));
}
Aggregations