use of io.druid.query.aggregation.post.ConstantPostAggregator in project druid by druid-io.
the class GroupByQueryRunnerTest method testSubqueryWithMultiColumnAggregators.
@Test
public void testSubqueryWithMultiColumnAggregators() {
final GroupByQuery subquery = GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.dataSource).setQuerySegmentSpec(QueryRunnerTestHelper.firstToThird).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "alias"))).setDimFilter(new JavaScriptDimFilter("market", "function(dim){ return true; }", null, JavaScriptConfig.getEnabledInstance())).setAggregatorSpecs(Arrays.asList(QueryRunnerTestHelper.rowsCount, new DoubleSumAggregatorFactory("idx_subagg", "index"), new JavaScriptAggregatorFactory("js_agg", Arrays.asList("index", "market"), "function(current, index, dim){return current + index + dim.length;}", "function(){return 0;}", "function(a,b){return a + b;}", JavaScriptConfig.getEnabledInstance()))).setPostAggregatorSpecs(Arrays.<PostAggregator>asList(new ArithmeticPostAggregator("idx_subpostagg", "+", Arrays.asList(new FieldAccessPostAggregator("the_idx_subagg", "idx_subagg"), new ConstantPostAggregator("thousand", 1000))))).setHavingSpec(new BaseHavingSpec() {
@Override
public boolean eval(Row row) {
return (row.getFloatMetric("idx_subpostagg") < 3800);
}
@Override
public byte[] getCacheKey() {
return new byte[0];
}
}).addOrderByColumn("alias").setGranularity(QueryRunnerTestHelper.dayGran).build();
final GroupByQuery query = GroupByQuery.builder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.firstToThird).setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("alias", "alias"))).setAggregatorSpecs(Arrays.asList(new LongSumAggregatorFactory("rows", "rows"), new LongSumAggregatorFactory("idx", "idx_subpostagg"), new DoubleSumAggregatorFactory("js_outer_agg", "js_agg"))).setPostAggregatorSpecs(Arrays.<PostAggregator>asList(new ArithmeticPostAggregator("idx_post", "+", Arrays.asList(new FieldAccessPostAggregator("the_idx_agg", "idx"), new ConstantPostAggregator("ten_thousand", 10000))))).setLimitSpec(new DefaultLimitSpec(Arrays.asList(new OrderByColumnSpec("alias", OrderByColumnSpec.Direction.DESCENDING)), 5)).setGranularity(QueryRunnerTestHelper.dayGran).build();
List<Row> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "travel", "rows", 1L, "idx_post", 11119.0, "idx", 1119L, "js_outer_agg", 123.92274475097656), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "technology", "rows", 1L, "idx_post", 11078.0, "idx", 1078L, "js_outer_agg", 82.62254333496094), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "news", "rows", 1L, "idx_post", 11121.0, "idx", 1121L, "js_outer_agg", 125.58358001708984), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "health", "rows", 1L, "idx_post", 11120.0, "idx", 1120L, "js_outer_agg", 124.13470458984375), GroupByQueryRunnerTestHelper.createExpectedRow("2011-04-01", "alias", "entertainment", "rows", 1L, "idx_post", 11158.0, "idx", 1158L, "js_outer_agg", 162.74722290039062));
// Subqueries are handled by the ToolChest
Iterable<Row> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "");
}
use of io.druid.query.aggregation.post.ConstantPostAggregator in project druid by druid-io.
the class DefaultLimitSpecTest method testBuildWithExplicitOrder.
@Test
public void testBuildWithExplicitOrder() {
DefaultLimitSpec limitSpec = new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("k1", OrderByColumnSpec.Direction.ASCENDING)), 2);
Function<Sequence<Row>, Sequence<Row>> limitFn = limitSpec.build(ImmutableList.<DimensionSpec>of(new DefaultDimensionSpec("k1", "k1")), ImmutableList.<AggregatorFactory>of(new LongSumAggregatorFactory("k2", "k2")), ImmutableList.<PostAggregator>of(new ConstantPostAggregator("k3", 1L)));
Assert.assertEquals(ImmutableList.of(testRowsList.get(0), testRowsList.get(1)), Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>()));
// if there is an aggregator with same name then that is used to build ordering
limitFn = limitSpec.build(ImmutableList.<DimensionSpec>of(new DefaultDimensionSpec("k1", "k1")), ImmutableList.<AggregatorFactory>of(new LongSumAggregatorFactory("k1", "k1")), ImmutableList.<PostAggregator>of(new ConstantPostAggregator("k3", 1L)));
Assert.assertEquals(ImmutableList.of(testRowsList.get(2), testRowsList.get(0)), Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>()));
// if there is a post-aggregator with same name then that is used to build ordering
limitFn = limitSpec.build(ImmutableList.<DimensionSpec>of(new DefaultDimensionSpec("k1", "k1")), ImmutableList.<AggregatorFactory>of(new LongSumAggregatorFactory("k2", "k2")), ImmutableList.<PostAggregator>of(new ArithmeticPostAggregator("k1", "+", ImmutableList.<PostAggregator>of(new ConstantPostAggregator("x", 1), new ConstantPostAggregator("y", 1)))));
Assert.assertEquals((List) ImmutableList.of(testRowsList.get(2), testRowsList.get(0)), (List) Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>()));
// makes same result
limitFn = limitSpec.build(ImmutableList.<DimensionSpec>of(new DefaultDimensionSpec("k1", "k1")), ImmutableList.<AggregatorFactory>of(new LongSumAggregatorFactory("k2", "k2")), ImmutableList.<PostAggregator>of(new ExpressionPostAggregator("k1", "1 + 1")));
Assert.assertEquals((List) ImmutableList.of(testRowsList.get(2), testRowsList.get(0)), (List) Sequences.toList(limitFn.apply(testRowsSequence), new ArrayList<Row>()));
}
use of io.druid.query.aggregation.post.ConstantPostAggregator in project druid by druid-io.
the class TopNBinaryFnBenchmark method setUp.
@Override
protected void setUp() throws Exception {
final ConstantPostAggregator constant = new ConstantPostAggregator("const", 1L);
final FieldAccessPostAggregator rowsPostAgg = new FieldAccessPostAggregator("rows", "rows");
final FieldAccessPostAggregator indexPostAgg = new FieldAccessPostAggregator("index", "index");
final List<AggregatorFactory> aggregatorFactories = new ArrayList<>();
aggregatorFactories.add(new CountAggregatorFactory("rows"));
aggregatorFactories.add(new LongSumAggregatorFactory("index", "index"));
for (int i = 1; i < aggCount; i++) {
aggregatorFactories.add(new CountAggregatorFactory("rows" + i));
}
final List<PostAggregator> postAggregators = new ArrayList<>();
for (int i = 0; i < postAggCount; i++) {
postAggregators.add(new ArithmeticPostAggregator("addrowsindexconstant" + i, "+", Lists.newArrayList(constant, rowsPostAgg, indexPostAgg)));
}
final DateTime currTime = new DateTime();
List<Map<String, Object>> list = new ArrayList<>();
for (int i = 0; i < threshold; i++) {
Map<String, Object> res = new HashMap<>();
res.put("testdim", "" + i);
res.put("rows", 1L);
for (int j = 0; j < aggCount; j++) {
res.put("rows" + j, 1L);
}
res.put("index", 1L);
list.add(res);
}
result1 = new Result<>(currTime, new TopNResultValue(list));
List<Map<String, Object>> list2 = new ArrayList<>();
for (int i = 0; i < threshold; i++) {
Map<String, Object> res = new HashMap<>();
res.put("testdim", "" + i);
res.put("rows", 2L);
for (int j = 0; j < aggCount; j++) {
res.put("rows" + j, 2L);
}
res.put("index", 2L);
list2.add(res);
}
result2 = new Result<>(currTime, new TopNResultValue(list2));
fn = new TopNBinaryFn(TopNResultMerger.identity, Granularities.ALL, new DefaultDimensionSpec("testdim", null), new NumericTopNMetricSpec("index"), 100, aggregatorFactories, postAggregators);
}
use of io.druid.query.aggregation.post.ConstantPostAggregator in project druid by druid-io.
the class TopNQueryQueryToolChestTest method testComputeCacheKeyWithDifferentPostAgg.
@Test
public void testComputeCacheKeyWithDifferentPostAgg() throws Exception {
final TopNQuery query1 = new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test"), new NumericTopNMetricSpec("post"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.<PostAggregator>of(new ConstantPostAggregator("post", 10)), null);
final TopNQuery query2 = new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test"), new NumericTopNMetricSpec("post"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.<PostAggregator>of(new ArithmeticPostAggregator("post", "+", ImmutableList.<PostAggregator>of(new FieldAccessPostAggregator(null, "metric1"), new FieldAccessPostAggregator(null, "metric1")))), null);
final CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy1 = new TopNQueryQueryToolChest(null, null).getCacheStrategy(query1);
final CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy2 = new TopNQueryQueryToolChest(null, null).getCacheStrategy(query2);
Assert.assertFalse(Arrays.equals(strategy1.computeCacheKey(query1), strategy2.computeCacheKey(query2)));
}
use of io.druid.query.aggregation.post.ConstantPostAggregator in project druid by druid-io.
the class TopNQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy = new TopNQueryQueryToolChest(null, null).getCacheStrategy(new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test"), new NumericTopNMetricSpec("metric1"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.<PostAggregator>of(new ConstantPostAggregator("post", 10)), null));
final Result<TopNResultValue> result = new Result<>(// test timestamps that result in integer size millis
new DateTime(123L), new TopNResultValue(Arrays.asList(ImmutableMap.<String, Object>of("test", "val1", "metric1", 2))));
Object preparedValue = strategy.prepareForCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TopNResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
Aggregations