use of org.apache.druid.query.aggregation.DoubleMinAggregatorFactory in project druid by druid-io.
the class TopNQueryTest method testQuerySerdeWithLookupExtractionFn.
@Test
public void testQuerySerdeWithLookupExtractionFn() throws IOException {
final TopNQuery expectedQuery = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(new ExtractionDimensionSpec(QueryRunnerTestHelper.MARKET_DIMENSION, QueryRunnerTestHelper.MARKET_DIMENSION, new LookupExtractionFn(new MapLookupExtractor(ImmutableMap.of("foo", "bar"), false), true, null, false, false))).metric(new NumericTopNMetricSpec(QueryRunnerTestHelper.INDEX_METRIC)).threshold(2).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC.getIntervals()).aggregators(Lists.newArrayList(Iterables.concat(QueryRunnerTestHelper.COMMON_DOUBLE_AGGREGATORS, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).build();
final String str = JSON_MAPPER.writeValueAsString(expectedQuery);
Assert.assertEquals(expectedQuery, JSON_MAPPER.readValue(str, TopNQuery.class));
}
use of org.apache.druid.query.aggregation.DoubleMinAggregatorFactory in project druid by druid-io.
the class FixedBucketsHistogramTopNQueryTest method testTopNWithFixedHistogramAgg.
@Test
public void testTopNWithFixedHistogramAgg() {
FixedBucketsHistogramAggregatorFactory factory = new FixedBucketsHistogramAggregatorFactory("histo", "index", 10, 0, 2000, FixedBucketsHistogram.OutlierHandlingMode.OVERFLOW, false);
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(QueryRunnerTestHelper.MARKET_DIMENSION).metric(QueryRunnerTestHelper.dependentPostAggMetric).threshold(4).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(Lists.newArrayList(Iterables.concat(QueryRunnerTestHelper.COMMON_DOUBLE_AGGREGATORS, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"), factory)))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT, QueryRunnerTestHelper.DEPENDENT_POST_AGG, new QuantilePostAggregator("quantile", "histo", 0.5f)).build();
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<TopNResultValue>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "total_market").put("rows", 186L).put("index", 215679.82879638672D).put("addRowsIndexConstant", 215866.82879638672D).put(QueryRunnerTestHelper.dependentPostAggMetric, 216053.82879638672D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1743.9217529296875D).put("minIndex", 792.3260498046875D).put("quantile", 1135.238f).put("histo", new FixedBucketsHistogram(0, 2000, 10, FixedBucketsHistogram.OutlierHandlingMode.OVERFLOW, new long[] { 0, 0, 0, 1, 21, 105, 42, 12, 5, 0 }, 186, 1743.92175, 792.326066, 0, 0, 0).toString()).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "upfront").put("rows", 186L).put("index", 192046.1060180664D).put("addRowsIndexConstant", 192233.1060180664D).put(QueryRunnerTestHelper.dependentPostAggMetric, 192420.1060180664D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1870.06103515625D).put("minIndex", 545.9906005859375D).put("quantile", 969.69696f).put("histo", new FixedBucketsHistogram(0, 2000, 10, FixedBucketsHistogram.OutlierHandlingMode.OVERFLOW, new long[] { 0, 0, 4, 33, 66, 35, 25, 11, 10, 2 }, 186, 1870.061029, 545.990623, 0, 0, 0).toString()).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "spot").put("rows", 837L).put("index", 95606.57232284546D).put("addRowsIndexConstant", 96444.57232284546D).put(QueryRunnerTestHelper.dependentPostAggMetric, 97282.57232284546D).put("uniques", QueryRunnerTestHelper.UNIQUES_9).put("maxIndex", 277.2735290527344D).put("minIndex", 59.02102279663086D).put("quantile", 100.23952f).put("histo", new FixedBucketsHistogram(0, 2000, 10, FixedBucketsHistogram.OutlierHandlingMode.OVERFLOW, new long[] { 835, 2, 0, 0, 0, 0, 0, 0, 0, 0 }, 837, 277.273533, 59.021022, 0, 0, 0).toString()).build()))));
List<Result<TopNResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
TestHelper.assertExpectedResults(expectedResults, results);
}
use of org.apache.druid.query.aggregation.DoubleMinAggregatorFactory in project druid by druid-io.
the class GroupByTimeseriesQueryRunnerTest method testFullOnTimeseriesMaxMin.
// GroupBy handles timestamps differently when granularity is ALL
@Override
@Test
public void testFullOnTimeseriesMaxMin() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(Granularities.ALL).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index")).descending(descending).build();
DateTime expectedEarliest = DateTimes.of("1970-01-01");
DateTime expectedLast = DateTimes.of("2011-04-15");
Iterable<Result<TimeseriesResultValue>> results = runner.run(QueryPlus.wrap(query)).toList();
Result<TimeseriesResultValue> result = results.iterator().next();
Assert.assertEquals(expectedEarliest, result.getTimestamp());
Assert.assertFalse(StringUtils.format("Timestamp[%s] > expectedLast[%s]", result.getTimestamp(), expectedLast), result.getTimestamp().isAfter(expectedLast));
final TimeseriesResultValue value = result.getValue();
Assert.assertEquals(result.toString(), 1870.061029, value.getDoubleMetric("maxIndex"), 1870.061029 * 1e-6);
Assert.assertEquals(result.toString(), 59.021022, value.getDoubleMetric("minIndex"), 59.021022 * 1e-6);
}
use of org.apache.druid.query.aggregation.DoubleMinAggregatorFactory in project druid by druid-io.
the class SchemalessTestFullTest method testFilteredTimeseries.
private void testFilteredTimeseries(QueryRunner runner, List<Result<TimeseriesResultValue>> expectedResults, String failMsg) {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(dataSource).granularity(ALL_GRAN).intervals(fullOnInterval).filters(marketDimension, "spot").aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(addRowsIndexConstant).build();
failMsg += " filtered timeseries ";
Iterable<Result<TimeseriesResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
TestHelper.assertExpectedResults(expectedResults, actualResults, failMsg);
}
use of org.apache.druid.query.aggregation.DoubleMinAggregatorFactory in project druid by druid-io.
the class SchemalessTestFullTest method testFilteredTopN.
/**
* See {@link #runTests}
*/
@SuppressWarnings("unused")
private void testFilteredTopN(QueryRunner runner, List<Result<TopNResultValue>> expectedResults, String failMsg) {
TopNQuery query = new TopNQueryBuilder().dataSource(dataSource).granularity(ALL_GRAN).dimension(marketDimension).filters(marketDimension, "spot").metric(indexMetric).threshold(3).intervals(fullOnInterval).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(addRowsIndexConstant).build();
failMsg += " filtered topN ";
Iterable<Result<TopNResultValue>> actualResults = runner.run(QueryPlus.wrap(query)).toList();
TestHelper.assertExpectedResults(expectedResults, actualResults, failMsg);
}
Aggregations