use of org.apache.druid.query.Result in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesGranularityNotAlignedOnSegmentBoundariesWithFilter.
@Test
public void testTimeseriesGranularityNotAlignedOnSegmentBoundariesWithFilter() {
TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).filters(QueryRunnerTestHelper.MARKET_DIMENSION, "spot", "upfront", "total_market").granularity(new PeriodGranularity(new Period("P7D"), null, DateTimes.inferTzFromString("America/Los_Angeles"))).intervals(Collections.singletonList(Intervals.of("2011-01-12T00:00:00.000-08:00/2011-01-20T00:00:00.000-08:00"))).aggregators(Arrays.asList(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))).descending(descending).context(makeContext()).build();
List<Result<TimeseriesResultValue>> expectedResults1 = Arrays.asList(new Result<>(new DateTime("2011-01-06T00:00:00.000-08:00", DateTimes.inferTzFromString("America/Los_Angeles")), new TimeseriesResultValue(ImmutableMap.of("rows", 13L, "idx", 6071L))), new Result<>(new DateTime("2011-01-13T00:00:00.000-08:00", DateTimes.inferTzFromString("America/Los_Angeles")), new TimeseriesResultValue(ImmutableMap.of("rows", 91L, "idx", 33382L))));
Iterable<Result<TimeseriesResultValue>> results1 = runner.run(QueryPlus.wrap(query1)).toList();
assertExpectedResults(expectedResults1, results1);
}
use of org.apache.druid.query.Result in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesWithNonExistentFilterAndMultiDim.
@Test
public void testTimeseriesWithNonExistentFilterAndMultiDim() {
AndDimFilter andDimFilter = new AndDimFilter(new SelectorDimFilter(QueryRunnerTestHelper.MARKET_DIMENSION, "billy", null), new SelectorDimFilter(QueryRunnerTestHelper.QUALITY_DIMENSION, "business", null));
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.DAY_GRAN).filters(andDimFilter).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(aggregatorFactoryList).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext()).build();
Map<String, Object> resultMap = new HashMap<>();
resultMap.put("rows", 0L);
resultMap.put("index", NullHandling.defaultDoubleValue());
resultMap.put("addRowsIndexConstant", NullHandling.replaceWithDefault() ? 1.0 : null);
resultMap.put("uniques", 0.0);
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(resultMap)), new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(resultMap)));
Iterable<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
assertExpectedResults(expectedResults, results);
}
use of org.apache.druid.query.Result in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeSeriesWithFilteredAggDimensionNotPresentNullValue.
@Test
public void testTimeSeriesWithFilteredAggDimensionNotPresentNullValue() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(Lists.newArrayList(Iterables.concat(aggregatorFactoryList, Collections.singletonList(new FilteredAggregatorFactory(new CountAggregatorFactory("filteredAgg"), new SelectorDimFilter("abraKaDabra", null, null)))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext()).build();
Iterable<Result<TimeseriesResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
List<Result<TimeseriesResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("filteredAgg", 26L, "addRowsIndexConstant", 12486.361190795898d, "index", 12459.361190795898d, "uniques", 9.019833517963864d, "rows", 26L))));
assertExpectedResults(expectedResults, actualResults);
}
use of org.apache.druid.query.Result in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesQueryZeroFilling.
@Test
public void testTimeseriesQueryZeroFilling() {
TimeseriesQuery query1 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).filters(QueryRunnerTestHelper.MARKET_DIMENSION, "spot", "upfront", "total_market").granularity(Granularities.HOUR).intervals(Collections.singletonList(Intervals.of("2011-04-14T00:00:00.000Z/2011-05-01T00:00:00.000Z"))).aggregators(Arrays.asList(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"))).descending(descending).context(makeContext()).build();
List<Result<TimeseriesResultValue>> lotsOfZeroes = new ArrayList<>();
final Iterable<Interval> iterable = Granularities.HOUR.getIterable(new Interval(DateTimes.of("2011-04-14T01"), DateTimes.of("2011-04-15")));
Map noRowsResult = new HashMap<>();
noRowsResult.put("rows", 0L);
noRowsResult.put("idx", NullHandling.defaultLongValue());
for (Interval interval : iterable) {
lotsOfZeroes.add(new Result<>(interval.getStart(), new TimeseriesResultValue(noRowsResult)));
}
List<Result<TimeseriesResultValue>> expectedResults1 = Lists.newArrayList(Iterables.concat(Collections.singletonList(new Result<>(DateTimes.of("2011-04-14T00"), new TimeseriesResultValue(ImmutableMap.of("rows", 13L, "idx", 4907L)))), lotsOfZeroes, Collections.singletonList(new Result<>(DateTimes.of("2011-04-15T00"), new TimeseriesResultValue(ImmutableMap.of("rows", 13L, "idx", 4717L))))));
Iterable<Result<TimeseriesResultValue>> results1 = runner.run(QueryPlus.wrap(query1)).toList();
assertExpectedResults(expectedResults1, results1);
}
use of org.apache.druid.query.Result in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testTimeseriesWithMultiValueDimFilterAndOr1.
@Test
public void testTimeseriesWithMultiValueDimFilterAndOr1() {
AndDimFilter andDimFilter = new AndDimFilter(new SelectorDimFilter(QueryRunnerTestHelper.MARKET_DIMENSION, "spot", null), new SelectorDimFilter(QueryRunnerTestHelper.PLACEMENTISH_DIMENSION, "a", null));
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.DAY_GRAN).filters(andDimFilter).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(aggregatorFactoryList).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext()).build();
AndDimFilter andDimFilter2 = new AndDimFilter(new SelectorDimFilter(QueryRunnerTestHelper.MARKET_DIMENSION, "spot", null), new SelectorDimFilter(QueryRunnerTestHelper.QUALITY_DIMENSION, "automotive", null));
TimeseriesQuery query2 = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.DAY_GRAN).filters(andDimFilter2).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(aggregatorFactoryList).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).descending(descending).context(makeContext()).build();
Iterable<Result<TimeseriesResultValue>> expectedResults = runner.run(QueryPlus.wrap(query2)).toList();
Iterable<Result<TimeseriesResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
TestHelper.assertExpectedResults(expectedResults, actualResults);
}
Aggregations