use of io.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testTopNWithExtractionFilterNoExistingValue.
@Test
public void testTopNWithExtractionFilterNoExistingValue() {
Map<String, String> extractionMap = new HashMap<>();
extractionMap.put("", "NULL");
MapLookupExtractor mapLookupExtractor = new MapLookupExtractor(extractionMap, false);
LookupExtractionFn lookupExtractionFn = new LookupExtractionFn(mapLookupExtractor, false, null, true, true);
DimFilter extractionFilter = new ExtractionDimFilter("null_column", "NULL", lookupExtractionFn, null);
TopNQueryBuilder topNQueryBuilder = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(QueryRunnerTestHelper.allGran).dimension("null_column").metric(QueryRunnerTestHelper.indexMetric).threshold(4).intervals(QueryRunnerTestHelper.fullOnInterval).aggregators(Lists.newArrayList(Iterables.concat(QueryRunnerTestHelper.commonAggregators, Lists.newArrayList(new FilteredAggregatorFactory(new DoubleMaxAggregatorFactory("maxIndex", "index"), extractionFilter), //new DoubleMaxAggregatorFactory("maxIndex", "index"),
new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant));
TopNQuery topNQueryWithNULLValueExtraction = topNQueryBuilder.filters(extractionFilter).build();
Map<String, Object> map = Maps.newHashMap();
map.put("null_column", null);
map.put("rows", 1209L);
map.put("index", 503332.5071372986D);
map.put("addRowsIndexConstant", 504542.5071372986D);
map.put("uniques", QueryRunnerTestHelper.UNIQUES_9);
map.put("maxIndex", 1870.06103515625D);
map.put("minIndex", 59.02102279663086D);
List<Result<TopNResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.asList(map))));
assertExpectedResults(expectedResults, topNQueryWithNULLValueExtraction);
// Assert the optimization path as well
final Sequence<Result<TopNResultValue>> retval = runWithPreMergeAndMerge(topNQueryWithNULLValueExtraction);
TestHelper.assertExpectedResults(expectedResults, retval);
}
use of io.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testFullOnTopNNumericStringColumnWithDecoration.
@Test
public void testFullOnTopNNumericStringColumnWithDecoration() {
ListFilteredDimensionSpec filteredSpec = new ListFilteredDimensionSpec(new DefaultDimensionSpec("qualityNumericString", "qns_alias", ValueType.LONG), Sets.newHashSet("120000", "140000", "160000"), true);
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(QueryRunnerTestHelper.allGran).dimension(filteredSpec).metric("maxIndex").threshold(4).intervals(QueryRunnerTestHelper.fullOnInterval).aggregators(Lists.<AggregatorFactory>newArrayList(Iterables.concat(QueryRunnerTestHelper.commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(Arrays.<PostAggregator>asList(QueryRunnerTestHelper.addRowsIndexConstant)).build();
List<Result<TopNResultValue>> expectedResults = Arrays.asList(new Result<TopNResultValue>(new DateTime("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>builder().put("qns_alias", 140000L).put(QueryRunnerTestHelper.indexMetric, 217725.42022705078D).put("rows", 279L).put("addRowsIndexConstant", 218005.42022705078D).put("uniques", QueryRunnerTestHelper.UNIQUES_1).put("maxIndex", 1870.06103515625D).put("minIndex", 91.27055358886719D).build(), ImmutableMap.<String, Object>builder().put("qns_alias", 160000L).put(QueryRunnerTestHelper.indexMetric, 210865.67966461182D).put("rows", 279L).put("addRowsIndexConstant", 211145.67966461182D).put("uniques", QueryRunnerTestHelper.UNIQUES_1).put("maxIndex", 1862.7379150390625D).put("minIndex", 99.2845230102539D).build(), ImmutableMap.<String, Object>builder().put("qns_alias", 120000L).put(QueryRunnerTestHelper.indexMetric, 12086.472755432129D).put("rows", 93L).put("addRowsIndexConstant", 12180.472755432129D).put("uniques", QueryRunnerTestHelper.UNIQUES_1).put("maxIndex", 193.78756713867188D).put("minIndex", 84.71052551269531D).build()))));
assertExpectedResults(expectedResults, query);
}
use of io.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class TopNQueryTest method testQuerySerialization.
@Test
public void testQuerySerialization() throws IOException {
Query query = new TopNQueryBuilder().dataSource(dataSource).granularity(allGran).dimension(marketDimension).metric(indexMetric).threshold(4).intervals(fullOnInterval).aggregators(Lists.<AggregatorFactory>newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(Arrays.<PostAggregator>asList(addRowsIndexConstant)).build();
String json = jsonMapper.writeValueAsString(query);
Query serdeQuery = jsonMapper.readValue(json, Query.class);
Assert.assertEquals(query, serdeQuery);
}
use of io.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class AppendTest method testRowFiltering.
@Test
public void testRowFiltering() {
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<TimeseriesResultValue>(new DateTime("2011-01-12T00:00:00.000Z"), new TimeseriesResultValue(ImmutableMap.<String, Object>builder().put("rows", 5L).put("index", 500.0D).put("addRowsIndexConstant", 506.0D).put("uniques", 0.0D).put("maxIndex", 100.0D).put("minIndex", 100.0D).build())));
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(dataSource).granularity(allGran).intervals(fullOnInterval).filters(marketDimension, "breakstuff").aggregators(Lists.<AggregatorFactory>newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(Arrays.<PostAggregator>asList(addRowsIndexConstant)).build();
QueryRunner runner = TestQueryRunners.makeTimeSeriesQueryRunner(segment3);
HashMap<String, Object> context = new HashMap<String, Object>();
TestHelper.assertExpectedResults(expectedResults, runner.run(query, context));
}
use of io.druid.query.aggregation.DoubleMaxAggregatorFactory in project druid by druid-io.
the class TimeseriesQueryRunnerTest method testFullOnTimeseriesMaxMin.
@Test
public void testFullOnTimeseriesMaxMin() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.dataSource).granularity(Granularities.ALL).intervals(QueryRunnerTestHelper.fullOnInterval).aggregators(Arrays.asList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))).descending(descending).build();
DateTime expectedEarliest = new DateTime("2011-01-12");
DateTime expectedLast = new DateTime("2011-04-15");
Iterable<Result<TimeseriesResultValue>> results = Sequences.toList(runner.run(query, CONTEXT), Lists.<Result<TimeseriesResultValue>>newArrayList());
Result<TimeseriesResultValue> result = results.iterator().next();
Assert.assertEquals(expectedEarliest, result.getTimestamp());
Assert.assertFalse(String.format("Timestamp[%s] > expectedLast[%s]", result.getTimestamp(), expectedLast), result.getTimestamp().isAfter(expectedLast));
final TimeseriesResultValue value = result.getValue();
Assert.assertEquals(result.toString(), 1870.06103515625, value.getDoubleMetric("maxIndex"), 0.0);
Assert.assertEquals(result.toString(), 59.02102279663086, value.getDoubleMetric("minIndex"), 0.0);
}
Aggregations