use of io.druid.query.aggregation.PostAggregator in project druid by druid-io.
the class QuantileSqlAggregatorTest method testQuantileOnComplexColumn.
@Test
public void testQuantileOnComplexColumn() throws Exception {
try (final DruidPlanner planner = plannerFactory.createPlanner(null)) {
final String sql = "SELECT\n" + "APPROX_QUANTILE(hist_m1, 0.01),\n" + "APPROX_QUANTILE(hist_m1, 0.5, 50),\n" + "APPROX_QUANTILE(hist_m1, 0.98, 200),\n" + "APPROX_QUANTILE(hist_m1, 0.99),\n" + "APPROX_QUANTILE(hist_m1, 0.99) FILTER(WHERE dim1 = 'abc'),\n" + "APPROX_QUANTILE(hist_m1, 0.999) FILTER(WHERE dim1 <> 'abc'),\n" + "APPROX_QUANTILE(hist_m1, 0.999) FILTER(WHERE dim1 = 'abc')\n" + "FROM foo";
final PlannerResult plannerResult = planner.plan(sql);
// Verify results
final List<Object[]> results = Sequences.toList(plannerResult.run(), new ArrayList<Object[]>());
final List<Object[]> expectedResults = ImmutableList.of(new Object[] { 1.0, 3.0, 5.880000114440918, 5.940000057220459, 6.0, 4.994999885559082, 6.0 });
Assert.assertEquals(expectedResults.size(), results.size());
for (int i = 0; i < expectedResults.size(); i++) {
Assert.assertArrayEquals(expectedResults.get(i), results.get(i));
}
// Verify query
Assert.assertEquals(Druids.newTimeseriesQueryBuilder().dataSource(CalciteTests.DATASOURCE1).intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Filtration.eternity()))).granularity(Granularities.ALL).aggregators(ImmutableList.of(new ApproximateHistogramFoldingAggregatorFactory("a0:agg", "hist_m1", null, null, null, null), new ApproximateHistogramFoldingAggregatorFactory("a2:agg", "hist_m1", 200, null, null, null), new FilteredAggregatorFactory(new ApproximateHistogramFoldingAggregatorFactory("a4:agg", "hist_m1", null, null, null, null), new SelectorDimFilter("dim1", "abc", null)), new FilteredAggregatorFactory(new ApproximateHistogramFoldingAggregatorFactory("a5:agg", "hist_m1", null, null, null, null), new NotDimFilter(new SelectorDimFilter("dim1", "abc", null))))).postAggregators(ImmutableList.<PostAggregator>of(new QuantilePostAggregator("a0", "a0:agg", 0.01f), new QuantilePostAggregator("a1", "a0:agg", 0.50f), new QuantilePostAggregator("a2", "a2:agg", 0.98f), new QuantilePostAggregator("a3", "a0:agg", 0.99f), new QuantilePostAggregator("a4", "a4:agg", 0.99f), new QuantilePostAggregator("a5", "a5:agg", 0.999f), new QuantilePostAggregator("a6", "a4:agg", 0.999f))).context(ImmutableMap.<String, Object>of("skipEmptyBuckets", true)).build(), Iterables.getOnlyElement(queryLogHook.getRecordedQueries()));
}
}
use of io.druid.query.aggregation.PostAggregator in project druid by druid-io.
the class ApproximateHistogramGroupByQueryTest method testGroupByWithApproximateHistogramAgg.
@Test
public void testGroupByWithApproximateHistogramAgg() {
ApproximateHistogramAggregatorFactory aggFactory = new ApproximateHistogramAggregatorFactory("apphisto", "index", 10, 5, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY);
GroupByQuery query = new GroupByQuery.Builder().setDataSource(QueryRunnerTestHelper.dataSource).setGranularity(QueryRunnerTestHelper.allGran).setDimensions(Arrays.<DimensionSpec>asList(new DefaultDimensionSpec(QueryRunnerTestHelper.marketDimension, "marketalias"))).setInterval(QueryRunnerTestHelper.fullOnInterval).setLimitSpec(new DefaultLimitSpec(Lists.newArrayList(new OrderByColumnSpec("marketalias", OrderByColumnSpec.Direction.DESCENDING)), 1)).setAggregatorSpecs(Lists.newArrayList(QueryRunnerTestHelper.rowsCount, aggFactory)).setPostAggregatorSpecs(Arrays.<PostAggregator>asList(new QuantilePostAggregator("quantile", "apphisto", 0.5f))).build();
List<Row> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("1970-01-01T00:00:00.000Z", "marketalias", "upfront", "rows", 186L, "quantile", 880.9881f, "apphisto", new Histogram(new float[] { 214.97299194335938f, 545.9906005859375f, 877.0081787109375f, 1208.0257568359375f, 1539.0433349609375f, 1870.06103515625f }, new double[] { 0.0, 67.53287506103516, 72.22068786621094, 31.984678268432617, 14.261756896972656 })));
Iterable<Row> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "approx-histo");
}
use of io.druid.query.aggregation.PostAggregator in project druid by druid-io.
the class ApproximateHistogramGroupByQueryTest method testGroupByWithSameNameComplexPostAgg.
@Test(expected = IllegalArgumentException.class)
public void testGroupByWithSameNameComplexPostAgg() {
ApproximateHistogramAggregatorFactory aggFactory = new ApproximateHistogramAggregatorFactory("quantile", "index", 10, 5, Float.NEGATIVE_INFINITY, Float.POSITIVE_INFINITY);
GroupByQuery query = new GroupByQuery.Builder().setDataSource(QueryRunnerTestHelper.dataSource).setGranularity(QueryRunnerTestHelper.allGran).setDimensions(Arrays.<DimensionSpec>asList(new DefaultDimensionSpec(QueryRunnerTestHelper.marketDimension, "marketalias"))).setInterval(QueryRunnerTestHelper.fullOnInterval).setLimitSpec(new DefaultLimitSpec(Lists.newArrayList(new OrderByColumnSpec("marketalias", OrderByColumnSpec.Direction.DESCENDING)), 1)).setAggregatorSpecs(Lists.newArrayList(QueryRunnerTestHelper.rowsCount, aggFactory)).setPostAggregatorSpecs(Arrays.<PostAggregator>asList(new QuantilePostAggregator("quantile", "quantile", 0.5f))).build();
List<Row> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow("1970-01-01T00:00:00.000Z", "marketalias", "upfront", "rows", 186L, "quantile", 880.9881f));
Iterable<Row> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "approx-histo");
}
use of io.druid.query.aggregation.PostAggregator in project druid by druid-io.
the class VarianceGroupByQueryTest method testPostAggHavingSpec.
@Test
public void testPostAggHavingSpec() {
VarianceTestHelper.RowBuilder expect = new VarianceTestHelper.RowBuilder(new String[] { "alias", "rows", "index", "index_var", "index_stddev" });
List<Row> expectedResults = expect.add("2011-04-01", "automotive", 2L, 269L, 299.0009819048282, 17.29164485827847).add("2011-04-01", "mezzanine", 6L, 4420L, 254083.76447001836, 504.06722217380724).add("2011-04-01", "premium", 6L, 4416L, 252279.2020389339, 502.27403082275106).build();
GroupByQuery query = GroupByQuery.builder().setDataSource(VarianceTestHelper.dataSource).setInterval("2011-04-02/2011-04-04").setDimensions(Lists.<DimensionSpec>newArrayList(new DefaultDimensionSpec("quality", "alias"))).setAggregatorSpecs(Arrays.asList(VarianceTestHelper.rowsCount, VarianceTestHelper.indexLongSum, VarianceTestHelper.indexVarianceAggr)).setPostAggregatorSpecs(ImmutableList.<PostAggregator>of(VarianceTestHelper.stddevOfIndexPostAggr)).setGranularity(new PeriodGranularity(new Period("P1M"), null, null)).setHavingSpec(new OrHavingSpec(ImmutableList.<HavingSpec>of(// 3 rows
new GreaterThanHavingSpec(VarianceTestHelper.stddevOfIndexMetric, 15L)))).build();
Iterable<Row> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "");
query = query.withLimitSpec(new DefaultLimitSpec(Arrays.<OrderByColumnSpec>asList(OrderByColumnSpec.asc(VarianceTestHelper.stddevOfIndexMetric)), 2));
expectedResults = expect.add("2011-04-01", "automotive", 2L, 269L, 299.0009819048282, 17.29164485827847).add("2011-04-01", "premium", 6L, 4416L, 252279.2020389339, 502.27403082275106).build();
results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "");
}
use of io.druid.query.aggregation.PostAggregator in project druid by druid-io.
the class DefaultLimitSpec method makeComparator.
private Ordering<Row> makeComparator(List<DimensionSpec> dimensions, List<AggregatorFactory> aggs, List<PostAggregator> postAggs) {
Ordering<Row> ordering = new Ordering<Row>() {
@Override
public int compare(Row left, Row right) {
return Longs.compare(left.getTimestampFromEpoch(), right.getTimestampFromEpoch());
}
};
Map<String, DimensionSpec> dimensionsMap = Maps.newHashMap();
for (DimensionSpec spec : dimensions) {
dimensionsMap.put(spec.getOutputName(), spec);
}
Map<String, AggregatorFactory> aggregatorsMap = Maps.newHashMap();
for (final AggregatorFactory agg : aggs) {
aggregatorsMap.put(agg.getName(), agg);
}
Map<String, PostAggregator> postAggregatorsMap = Maps.newHashMap();
for (PostAggregator postAgg : postAggs) {
postAggregatorsMap.put(postAgg.getName(), postAgg);
}
for (OrderByColumnSpec columnSpec : columns) {
String columnName = columnSpec.getDimension();
Ordering<Row> nextOrdering = null;
if (postAggregatorsMap.containsKey(columnName)) {
nextOrdering = metricOrdering(columnName, postAggregatorsMap.get(columnName).getComparator());
} else if (aggregatorsMap.containsKey(columnName)) {
nextOrdering = metricOrdering(columnName, aggregatorsMap.get(columnName).getComparator());
} else if (dimensionsMap.containsKey(columnName)) {
nextOrdering = dimensionOrdering(columnName, columnSpec.getDimensionComparator());
}
if (nextOrdering == null) {
throw new ISE("Unknown column in order clause[%s]", columnSpec);
}
switch(columnSpec.getDirection()) {
case DESCENDING:
nextOrdering = nextOrdering.reverse();
}
ordering = ordering.compound(nextOrdering);
}
return ordering;
}
Aggregations