use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeSeriesWithFilteredAggDimensionNotPresentNullValue.
@Test
public void testTimeSeriesWithFilteredAggDimensionNotPresentNullValue() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(Lists.newArrayList(Iterables.concat(aggregatorFactoryList, Collections.singletonList(new FilteredAggregatorFactory(new CountAggregatorFactory("filteredAgg"), new SelectorDimFilter("abraKaDabra", null, null)))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext()).build();
Iterable<Result<TimeseriesResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("filteredAgg", 26L, "addRowsIndexConstant", 12486.361190795898d, "index", 12459.361190795898d, "uniques", 9.019833517963864d, "rows", 26L))));
assertExpectedResults(expectedResults, actualResults);
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeSeriesWithFilteredAggValueNotPresent.
@Test
public void testTimeSeriesWithFilteredAggValueNotPresent() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(Lists.newArrayList(Iterables.concat(aggregatorFactoryList, Collections.singletonList(new FilteredAggregatorFactory(new CountAggregatorFactory("filteredAgg"), new NotDimFilter(new SelectorDimFilter(QueryRunnerTestHelper.MARKET_DIMENSION, "LolLol", null))))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext()).build();
Iterable<Result<TimeseriesResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("filteredAgg", 26L, "addRowsIndexConstant", 12486.361190795898d, "index", 12459.361190795898d, "uniques", 9.019833517963864d, "rows", 26L))));
assertExpectedResults(expectedResults, actualResults);
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testTopNOnMissingColumnWithExtractionFn.
@Test
public void testTopNOnMissingColumnWithExtractionFn() {
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(new ExtractionDimensionSpec("nonexistentColumn", "alias", new StringFormatExtractionFn("theValue"))).metric("rows").threshold(4).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(new CountAggregatorFactory("rows")).build();
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Collections.<Map<String, Object>>singletonList(ImmutableMap.<String, Object>builder().put("alias", "theValue").put("rows", 1209L).build()))));
assertExpectedResults(expectedResults, query);
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testAggregateOnDoubleNumericNull.
@Test
public void testAggregateOnDoubleNumericNull() {
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(new DefaultDimensionSpec("doubleNumericNull", "dim", ColumnType.DOUBLE)).metric(new DimensionTopNMetricSpec(null, StringComparators.NUMERIC)).threshold(10000).aggregators(new CountAggregatorFactory("count")).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).build();
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.asList(makeRowWithNulls("dim", NullHandling.defaultDoubleValue(), "count", 279L), makeRowWithNulls("dim", 10.0, "count", 93L), makeRowWithNulls("dim", 20.0, "count", 93L), makeRowWithNulls("dim", 40.0, "count", 93L), makeRowWithNulls("dim", 50.0, "count", 279L), makeRowWithNulls("dim", 70.0, "count", 279L), makeRowWithNulls("dim", 80.0, "count", 93L)))));
assertExpectedResults(expectedResults, query);
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class IndexMergerV9WithSpatialIndexTest method testSpatialQueryWithOtherSpatialDim.
@Test
public void testSpatialQueryWithOtherSpatialDim() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("test").granularity(Granularities.ALL).intervals(Collections.singletonList(Intervals.of("2013-01-01/2013-01-07"))).filters(new SpatialDimFilter("spatialIsRad", new RadiusBound(new float[] { 0.0f, 0.0f }, 5))).aggregators(Arrays.asList(new CountAggregatorFactory("rows"), new LongSumAggregatorFactory("val", "val"))).build();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2013-01-01T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 1L).put("val", 13L).build())));
try {
TimeseriesQueryRunnerFactory factory = new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), QueryRunnerTestHelper.NOOP_QUERYWATCHER);
QueryRunner runner = new FinalizeResultsQueryRunner(factory.createRunner(segment), factory.getToolchest());
TestHelper.assertExpectedResults(expectedResults, runner.run(QueryPlus.wrap(query)));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
Aggregations