use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class CalciteArraysQueryTest method testGroupByArrayFromCase.
@Test
public void testGroupByArrayFromCase() throws Exception {
cannotVectorize();
testQuery("SELECT CASE WHEN dim4 = 'a' THEN ARRAY['foo','bar','baz'] END as mv_value, count(1) from numfoo GROUP BY 1", QUERY_CONTEXT_NO_STRINGIFY_ARRAY, ImmutableList.of(GroupByQuery.builder().setDataSource(CalciteTests.DATASOURCE3).setInterval(querySegmentSpec(Filtration.eternity())).setVirtualColumns(expressionVirtualColumn("v0", "case_searched((\"dim4\" == 'a'),array('foo','bar','baz'),null)", ColumnType.STRING_ARRAY)).setDimensions(new DefaultDimensionSpec("v0", "_d0", ColumnType.STRING_ARRAY)).setGranularity(Granularities.ALL).setAggregatorSpecs(new CountAggregatorFactory("a0")).setContext(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { null, 3L }, new Object[] { ImmutableList.of("foo", "bar", "baz"), 3L }));
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TimeseriesQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery> strategy = TOOL_CHEST.getCacheStrategy(new TimeseriesQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.of("2015-01-01/2015-01-02"))), descending, VirtualColumns.EMPTY, null, Granularities.ALL, ImmutableList.of(new CountAggregatorFactory("metric1"), new LongSumAggregatorFactory("metric0", "metric0"), new StringLastAggregatorFactory("complexMetric", "test", null, null)), ImmutableList.of(new ConstantPostAggregator("post", 10)), 0, null));
final Result<TimeseriesResultValue> result1 = new Result<>(// test timestamps that result in integer size millis
DateTimes.utc(123L), new TimeseriesResultValue(ImmutableMap.of("metric1", 2, "metric0", 3, "complexMetric", new SerializablePairLongString(123L, "val1"))));
Object preparedValue = strategy.prepareForSegmentLevelCache().apply(result1);
ObjectMapper objectMapper = TestHelper.makeJsonMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TimeseriesResultValue> fromCacheResult = strategy.pullFromSegmentLevelCache().apply(fromCacheValue);
Assert.assertEquals(result1, fromCacheResult);
final Result<TimeseriesResultValue> result2 = new Result<>(// test timestamps that result in integer size millis
DateTimes.utc(123L), new TimeseriesResultValue(ImmutableMap.of("metric1", 2, "metric0", 3, "complexMetric", "val1", "post", 10)));
Object preparedResultLevelCacheValue = strategy.prepareForCache(true).apply(result2);
Object fromResultLevelCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedResultLevelCacheValue), strategy.getCacheObjectClazz());
Result<TimeseriesResultValue> fromResultLevelCacheRes = strategy.pullFromCache(true).apply(fromResultLevelCacheValue);
Assert.assertEquals(result2, fromResultLevelCacheRes);
final Result<TimeseriesResultValue> result3 = new Result<>(// null timestamp similar to grandTotal
null, new TimeseriesResultValue(ImmutableMap.of("metric1", 2, "metric0", 3, "complexMetric", "val1", "post", 10)));
preparedResultLevelCacheValue = strategy.prepareForCache(true).apply(result3);
fromResultLevelCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedResultLevelCacheValue), strategy.getCacheObjectClazz());
fromResultLevelCacheRes = strategy.pullFromCache(true).apply(fromResultLevelCacheValue);
Assert.assertEquals(result3, fromResultLevelCacheRes);
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TimeseriesQueryQueryToolChestTest method testResultLevelCacheKey.
@Test
public void testResultLevelCacheKey() {
final TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2015-01-01/2015-01-02").descending(descending).granularity(Granularities.ALL).aggregators(ImmutableList.of(new LongSumAggregatorFactory("metric0", "metric0"), new CountAggregatorFactory("metric1"))).postAggregators(ImmutableList.of(new ArithmeticPostAggregator("post", "+", ImmutableList.of(new FieldAccessPostAggregator(null, "metric1"), new FieldAccessPostAggregator(null, "metric0"))))).build();
final TimeseriesQuery query2 = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2015-01-01/2015-01-02").descending(descending).granularity(Granularities.ALL).aggregators(ImmutableList.of(new LongSumAggregatorFactory("metric0", "metric0"), new CountAggregatorFactory("metric1"))).postAggregators(ImmutableList.of(new ArithmeticPostAggregator("post", "/", ImmutableList.of(new FieldAccessPostAggregator(null, "metric1"), new FieldAccessPostAggregator(null, "metric0"))))).build();
Assert.assertTrue(Arrays.equals(TOOL_CHEST.getCacheStrategy(query1).computeCacheKey(query1), TOOL_CHEST.getCacheStrategy(query2).computeCacheKey(query2)));
Assert.assertFalse(Arrays.equals(TOOL_CHEST.getCacheStrategy(query1).computeResultLevelCacheKey(query1), TOOL_CHEST.getCacheStrategy(query2).computeResultLevelCacheKey(query2)));
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TimeseriesQueryQueryToolChestTest method testResultLevelCacheKeyWithGrandTotal.
@Test
public void testResultLevelCacheKeyWithGrandTotal() {
final TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2015-01-01/2015-01-02").descending(descending).granularity(Granularities.ALL).aggregators(ImmutableList.of(new LongSumAggregatorFactory("metric0", "metric0"), new CountAggregatorFactory("metric1"))).postAggregators(ImmutableList.of(new ArithmeticPostAggregator("post", "+", ImmutableList.of(new FieldAccessPostAggregator(null, "metric1"), new FieldAccessPostAggregator(null, "metric0"))))).context(ImmutableMap.of(TimeseriesQuery.CTX_GRAND_TOTAL, true)).build();
final TimeseriesQuery query2 = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2015-01-01/2015-01-02").descending(descending).granularity(Granularities.ALL).aggregators(ImmutableList.of(new LongSumAggregatorFactory("metric0", "metric0"), new CountAggregatorFactory("metric1"))).postAggregators(ImmutableList.of(new ArithmeticPostAggregator("post", "/", ImmutableList.of(new FieldAccessPostAggregator(null, "metric1"), new FieldAccessPostAggregator(null, "metric0"))))).context(ImmutableMap.of(TimeseriesQuery.CTX_GRAND_TOTAL, true)).build();
Assert.assertTrue(Arrays.equals(TOOL_CHEST.getCacheStrategy(query1).computeCacheKey(query1), TOOL_CHEST.getCacheStrategy(query2).computeCacheKey(query2)));
Assert.assertFalse(Arrays.equals(TOOL_CHEST.getCacheStrategy(query1).computeResultLevelCacheKey(query1), TOOL_CHEST.getCacheStrategy(query2).computeResultLevelCacheKey(query2)));
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeSeriesWithFilteredAggInvertedNullValue.
@Test
public void testTimeSeriesWithFilteredAggInvertedNullValue() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(Lists.newArrayList(Iterables.concat(aggregatorFactoryList, Collections.singletonList(new FilteredAggregatorFactory(new CountAggregatorFactory("filteredAgg"), new NotDimFilter(new SelectorDimFilter(QueryRunnerTestHelper.MARKET_DIMENSION, null, null))))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext()).build();
Iterable<Result<TimeseriesResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("filteredAgg", 26L, "addRowsIndexConstant", 12486.361190795898d, "index", 12459.361190795898d, "uniques", 9.019833517963864d, "rows", 26L))));
assertExpectedResults(expectedResults, actualResults);
}
Aggregations