use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class GroupByQueryRunnerTest method testDifferentGroupingSubquery.
@Test
public void testDifferentGroupingSubquery() {
GroupByQuery subquery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"), new LongSumAggregatorFactory("indexMaxPlusTen", "indexMaxPlusTen")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
GroupByQuery query = makeQueryBuilder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new DoubleMaxAggregatorFactory("idx", "idx"), new DoubleMaxAggregatorFactory("indexMaxPlusTen", "indexMaxPlusTen")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
List<ResultRow> expectedResults = makeRows(query, new String[] { "__time", "rows", "idx", "indexMaxPlusTen" }, new Object[] { "2011-04-01", 9L, 2900.0, 2930.0 }, new Object[] { "2011-04-02", 9L, 2505.0, 2535.0 });
TestHelper.assertExpectedObjects(expectedResults, GroupByQueryRunnerTestHelper.runQuery(factory, runner, query), "subquery");
subquery = makeQueryBuilder(subquery).setVirtualColumns(new ExpressionVirtualColumn("expr", "-index + 100", ColumnType.FLOAT, TestExprMacroTable.INSTANCE)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "expr"), new LongSumAggregatorFactory("indexMaxPlusTen", "indexMaxPlusTen")).build();
query = (GroupByQuery) query.withDataSource(new QueryDataSource(subquery));
expectedResults = makeRows(query, new String[] { "__time", "rows", "idx", "indexMaxPlusTen" }, new Object[] { "2011-04-01", 9L, 21.0, 2930.0 }, new Object[] { "2011-04-02", 9L, 2.0, 2535.0 });
TestHelper.assertExpectedObjects(expectedResults, GroupByQueryRunnerTestHelper.runQuery(factory, runner, query), "subquery");
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class GroupByQueryRunnerTest method testDifferentGroupingSubqueryWithFilter.
@Test
public void testDifferentGroupingSubqueryWithFilter() {
GroupByQuery subquery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("quality", "quality")).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
GroupByQuery query = makeQueryBuilder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(new DoubleMaxAggregatorFactory("idx", "idx")).setDimFilter(new OrDimFilter(Lists.newArrayList(new SelectorDimFilter("quality", "automotive", null), new SelectorDimFilter("quality", "premium", null), new SelectorDimFilter("quality", "mezzanine", null), new SelectorDimFilter("quality", "business", null), new SelectorDimFilter("quality", "entertainment", null), new SelectorDimFilter("quality", "health", null), new SelectorDimFilter("quality", "news", null), new SelectorDimFilter("quality", "technology", null), new SelectorDimFilter("quality", "travel", null)))).setGranularity(QueryRunnerTestHelper.DAY_GRAN).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(query, "2011-04-01", "idx", 2900.0), makeRow(query, "2011-04-02", "idx", 2505.0));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "subquery-filter");
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class SchemalessTestSimpleTest method testFullOnTimeseries.
@Test
public void testFullOnTimeseries() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(dataSource).granularity(ALL_GRAN).intervals(fullOnInterval).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(addRowsIndexConstant).build();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result(DateTimes.of("2011-01-12T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", coalesceAbsentAndEmptyDims ? 10L : 11L).put("index", 900.0).put("addRowsIndexConstant", coalesceAbsentAndEmptyDims ? 911.0 : 912.0).put("uniques", 2.000977198748901D).put("maxIndex", 100.0).put("minIndex", NullHandling.replaceWithDefault() ? 0.0 : 100.0).build())));
QueryRunner runner = TestQueryRunners.makeTimeSeriesQueryRunner(segment);
TestHelper.assertExpectedResults(expectedResults, runner.run(QueryPlus.wrap(query)));
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class SchemalessTestSimpleTest method testFullOnTopN.
// @Test TODO: Handling of null values is inconsistent right now, need to make it all consistent and re-enable test
// TODO: Complain to Eric when you see this. It shouldn't be like this...
@Ignore
@SuppressWarnings("unused")
public void testFullOnTopN() {
TopNQuery query = new TopNQueryBuilder().dataSource(dataSource).granularity(ALL_GRAN).dimension(marketDimension).metric(indexMetric).threshold(3).intervals(fullOnInterval).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(addRowsIndexConstant).build();
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.asList(new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "spot").put("rows", 4L).put("index", 400.0D).put("addRowsIndexConstant", 405.0D).put("uniques", 1.0002442201269182D).put("maxIndex", 100.0).put("minIndex", 100.0).build()), new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "").put("rows", 2L).put("index", 200.0D).put("addRowsIndexConstant", 203.0D).put("uniques", 0.0).put("maxIndex", 100.0D).put("minIndex", 100.0D).build()), new DimensionAndMetricValueExtractor(ImmutableMap.<String, Object>builder().put("market", "total_market").put("rows", 2L).put("index", 200.0D).put("addRowsIndexConstant", 203.0D).put("uniques", 1.0002442201269182D).put("maxIndex", 100.0D).put("minIndex", 100.0D).build())))));
try (CloseableStupidPool<ByteBuffer> pool = TestQueryRunners.createDefaultNonBlockingPool()) {
QueryRunner runner = TestQueryRunners.makeTopNQueryRunner(segment, pool);
TestHelper.assertExpectedResults(expectedResults, runner.run(QueryPlus.wrap(query)));
}
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class SchemalessTestFullTest method testFullOnTimeseries.
private void testFullOnTimeseries(QueryRunner runner, List<Result<TimeseriesResultValue>> expectedResults, String failMsg) {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(dataSource).granularity(ALL_GRAN).intervals(fullOnInterval).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(addRowsIndexConstant).build();
failMsg += " timeseries ";
Iterable<Result<TimeseriesResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
TestHelper.assertExpectedResults(expectedResults, actualResults, failMsg);
}
Aggregations