use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class CalciteQueryTest method testExpressionAggregations.
@Test
public void testExpressionAggregations() throws Exception {
// Cannot vectorize due to expressions.
cannotVectorize();
final ExprMacroTable macroTable = CalciteTests.createExprMacroTable();
testQuery("SELECT\n" + " SUM(cnt * 3),\n" + " LN(SUM(cnt) + SUM(m1)),\n" + " MOD(SUM(cnt), 4),\n" + " SUM(CHARACTER_LENGTH(CAST(cnt * 10 AS VARCHAR))),\n" + " MAX(CHARACTER_LENGTH(dim2) + LN(m1))\n" + "FROM druid.foo", ImmutableList.of(Druids.newTimeseriesQueryBuilder().dataSource(CalciteTests.DATASOURCE1).intervals(querySegmentSpec(Filtration.eternity())).granularity(Granularities.ALL).aggregators(aggregators(new LongSumAggregatorFactory("a0", null, "(\"cnt\" * 3)", macroTable), new LongSumAggregatorFactory("a1", "cnt"), new DoubleSumAggregatorFactory("a2", "m1"), new LongSumAggregatorFactory("a3", null, "strlen(CAST((\"cnt\" * 10), 'STRING'))", macroTable), new DoubleMaxAggregatorFactory("a4", null, "(strlen(\"dim2\") + log(\"m1\"))", macroTable))).postAggregators(expressionPostAgg("p0", "log((\"a1\" + \"a2\"))"), expressionPostAgg("p1", "(\"a1\" % 4)")).context(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { 18L, 3.295836866004329, 2, 12L, 3f + (Math.log(5.0)) }));
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByNestedWithInnerQueryNumericsWithLongTime.
@Test
public void testGroupByNestedWithInnerQueryNumericsWithLongTime() {
if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
expectedException.expect(UnsupportedOperationException.class);
expectedException.expectMessage("GroupBy v1 only supports dimensions with an outputType of STRING.");
}
GroupByQuery subQuery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("market", "alias"), new DefaultDimensionSpec("__time", "time_alias", ColumnType.LONG), new DefaultDimensionSpec("index", "index_alias", ColumnType.FLOAT)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT).setGranularity(QueryRunnerTestHelper.ALL_GRAN).build();
GroupByQuery outerQuery = makeQueryBuilder().setDataSource(subQuery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("alias", "market"), new DefaultDimensionSpec("time_alias", "time_alias2", ColumnType.LONG)).setAggregatorSpecs(new LongMaxAggregatorFactory("time_alias_max", "time_alias"), new DoubleMaxAggregatorFactory("index_alias_max", "index_alias")).setGranularity(QueryRunnerTestHelper.ALL_GRAN).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(outerQuery, "2011-04-01", "market", "spot", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 158.74722290039062), makeRow(outerQuery, "2011-04-01", "market", "spot", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 166.01605224609375), makeRow(outerQuery, "2011-04-01", "market", "total_market", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 1522.043701171875), makeRow(outerQuery, "2011-04-01", "market", "total_market", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 1321.375), makeRow(outerQuery, "2011-04-01", "market", "upfront", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 1447.3411865234375), makeRow(outerQuery, "2011-04-01", "market", "upfront", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 1144.3424072265625));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, outerQuery);
TestHelper.assertExpectedObjects(expectedResults, results, "numerics");
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testFullOnTopN.
@Test
public void testFullOnTopN() {
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(QueryRunnerTestHelper.MARKET_DIMENSION).metric(QueryRunnerTestHelper.INDEX_METRIC).threshold(4).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).build();
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "total_market").put("rows", 186L).put("index", 215679.82879638672D).put("addRowsIndexConstant", 215866.82879638672D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1743.92175D).put("minIndex", 792.3260498046875D).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "upfront").put("rows", 186L).put("index", 192046.1060180664D).put("addRowsIndexConstant", 192233.1060180664D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1870.061029D).put("minIndex", 545.9906005859375D).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "spot").put("rows", 837L).put("index", 95606.57232284546D).put("addRowsIndexConstant", 96444.57232284546D).put("uniques", QueryRunnerTestHelper.UNIQUES_9).put("maxIndex", 277.273533D).put("minIndex", 59.02102279663086D).build()))));
assertExpectedResults(expectedResults, query);
assertExpectedResults(expectedResults, query.withAggregatorSpecs(Lists.newArrayList(Iterables.concat(QueryRunnerTestHelper.COMMON_FLOAT_AGGREGATORS, Lists.newArrayList(new FloatMaxAggregatorFactory("maxIndex", "indexFloat"), new FloatMinAggregatorFactory("minIndex", "indexFloat"))))));
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testFullOnTopNDimExtractionAllNulls.
@Test
public void testFullOnTopNDimExtractionAllNulls() {
String jsFn = "function(str) { return null; }";
ExtractionFn jsExtractionFn = new JavaScriptExtractionFn(jsFn, false, JavaScriptConfig.getEnabledInstance());
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(new ExtractionDimensionSpec(QueryRunnerTestHelper.MARKET_DIMENSION, QueryRunnerTestHelper.MARKET_DIMENSION, jsExtractionFn)).metric(QueryRunnerTestHelper.INDEX_METRIC).threshold(4).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).build();
Map<String, Object> expectedMap = new HashMap<>();
expectedMap.put(QueryRunnerTestHelper.MARKET_DIMENSION, null);
expectedMap.put("rows", 1209L);
expectedMap.put("index", 503332.5071372986D);
expectedMap.put("addRowsIndexConstant", 504542.5071372986D);
expectedMap.put("uniques", 9.019833517963864);
expectedMap.put("maxIndex", 1870.061029D);
expectedMap.put("minIndex", 59.02102279663086D);
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Collections.singletonList(expectedMap))));
assertExpectedResults(expectedResults, query);
}
use of org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testTopNWithExtractionFilterNoExistingValue.
@Test
public void testTopNWithExtractionFilterNoExistingValue() {
Map<String, String> extractionMap = new HashMap<>();
MapLookupExtractor mapLookupExtractor = new MapLookupExtractor(extractionMap, false);
LookupExtractionFn lookupExtractionFn;
if (NullHandling.replaceWithDefault()) {
lookupExtractionFn = new LookupExtractionFn(mapLookupExtractor, false, null, true, true);
extractionMap.put("", "NULL");
} else {
extractionMap.put("", "NOT_USED");
lookupExtractionFn = new LookupExtractionFn(mapLookupExtractor, false, "NULL", true, true);
}
DimFilter extractionFilter = new ExtractionDimFilter("null_column", "NULL", lookupExtractionFn, null);
TopNQueryBuilder topNQueryBuilder = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension("null_column").metric(QueryRunnerTestHelper.INDEX_METRIC).threshold(4).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new FilteredAggregatorFactory(new DoubleMaxAggregatorFactory("maxIndex", "index"), extractionFilter), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT);
TopNQuery topNQueryWithNULLValueExtraction = topNQueryBuilder.filters(extractionFilter).build();
Map<String, Object> map = new HashMap<>();
map.put("null_column", null);
map.put("rows", 1209L);
map.put("index", 503332.5071372986D);
map.put("addRowsIndexConstant", 504542.5071372986D);
map.put("uniques", QueryRunnerTestHelper.UNIQUES_9);
map.put("maxIndex", 1870.061029D);
map.put("minIndex", 59.02102279663086D);
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Collections.singletonList(map))));
assertExpectedResults(expectedResults, topNQueryWithNULLValueExtraction);
// Assert the optimization path as well
final Sequence<Result<TopNResultValue>> retval = runWithPreMergeAndMerge(topNQueryWithNULLValueExtraction);
TestHelper.assertExpectedResults(expectedResults, retval);
}
Aggregations