use of org.apache.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.
the class SchemaEvolutionTest method testHyperUniqueEvolutionTimeseries.
@Test
@Parameters(method = "doVectorize")
public void testHyperUniqueEvolutionTimeseries(boolean doVectorize) {
final TimeseriesQueryRunnerFactory factory = QueryRunnerTestHelper.newTimeseriesQueryRunnerFactory();
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals("1000/3000").aggregators(ImmutableList.of(new HyperUniquesAggregatorFactory("uniques", "uniques"))).context(ImmutableMap.of(QueryContexts.VECTORIZE_KEY, doVectorize)).build();
// index1 has no "uniques" column
Assert.assertEquals(timeseriesResult(ImmutableMap.of("uniques", 0d)), runQuery(query, factory, ImmutableList.of(index1)));
// index1 (no uniques) + index2 and index3 (yes uniques); we should be able to combine
Assert.assertEquals(timeseriesResult(ImmutableMap.of("uniques", 4.003911343725148d)), runQuery(query, factory, ImmutableList.of(index1, index2, index3)));
}
use of org.apache.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.
the class SchemaEvolutionTest method testNumericEvolutionFiltering.
@Test
@Parameters(method = "doVectorize")
public void testNumericEvolutionFiltering(boolean doVectorize) {
final TimeseriesQueryRunnerFactory factory = QueryRunnerTestHelper.newTimeseriesQueryRunnerFactory();
// "c1" changes from string(1) -> long(2) -> float(3) -> nonexistent(4)
// test behavior of filtering
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals("1000/3000").filters(new BoundDimFilter("c1", "9", "11", false, false, null, null, StringComparators.NUMERIC)).aggregators(ImmutableList.of(new LongSumAggregatorFactory("a", "c1"), new DoubleSumAggregatorFactory("b", "c1"), new FloatSumAggregatorFactory("d", "c1"), new LongMinAggregatorFactory("e", "c1"), new CountAggregatorFactory("c"))).context(ImmutableMap.of(QueryContexts.VECTORIZE_KEY, doVectorize)).build();
// Only string(1) -- which we can filter but not aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 19L, "b", 19.1, "c", 2L, "d", 19.1f, "e", 9L)), runQuery(query, factory, ImmutableList.of(index1)));
// Only long(2) -- which we can filter and aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 19L, "b", 19.0, "c", 2L, "d", 19.0f, "e", 9L)), runQuery(query, factory, ImmutableList.of(index2)));
// Only float(3) -- which we can't filter, but can aggregate
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 19L, "b", 19.1, "c", 2L, "d", 19.1f, "e", 9L)), runQuery(query, factory, ImmutableList.of(index3)));
// Only nonexistent(4)
Assert.assertEquals(timeseriesResult(TestHelper.createExpectedMap("a", NullHandling.defaultLongValue(), "b", NullHandling.defaultDoubleValue(), "c", 0L, "d", NullHandling.defaultFloatValue(), "e", NullHandling.sqlCompatible() ? null : Long.MAX_VALUE)), runQuery(query, factory, ImmutableList.of(index4)));
// string(1) + long(2) + float(3) + nonexistent(4)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 57L, "b", 57.2, "c", 6L, "d", 57.20000076293945, "e", 9L)), runQuery(query, factory, ImmutableList.of(index1, index2, index3, index4)));
}
use of org.apache.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.
the class SchemaEvolutionTest method testNumericEvolutionTimeseriesAggregation.
@Test
@Parameters(method = "doVectorize")
public void testNumericEvolutionTimeseriesAggregation(boolean doVectorize) {
final TimeseriesQueryRunnerFactory factory = QueryRunnerTestHelper.newTimeseriesQueryRunnerFactory();
// "c1" changes from string(1) -> long(2) -> float(3) -> nonexistent(4)
// test behavior of longSum/doubleSum with/without expressions
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals("1000/3000").aggregators(ImmutableList.of(new LongSumAggregatorFactory("a", "c1"), new DoubleSumAggregatorFactory("b", "c1"), new LongSumAggregatorFactory("c", null, "c1 * 1", TestExprMacroTable.INSTANCE), new DoubleSumAggregatorFactory("d", null, "c1 * 1", TestExprMacroTable.INSTANCE))).context(ImmutableMap.of(QueryContexts.VECTORIZE_KEY, doVectorize)).build();
// Only string(1)
// Note: Expressions implicitly cast strings to numbers, leading to the a/b vs c/d difference.
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 31L, "b", THIRTY_ONE_POINT_ONE, "c", 31L, "d", THIRTY_ONE_POINT_ONE)), runQuery(query, factory, ImmutableList.of(index1)));
// Only long(2)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 31L, "b", 31.0, "c", 31L, "d", 31.0)), runQuery(query, factory, ImmutableList.of(index2)));
// Only float(3)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 31L, "b", THIRTY_ONE_POINT_ONE, "c", 31L, "d", THIRTY_ONE_POINT_ONE)), runQuery(query, factory, ImmutableList.of(index3)));
// Only nonexistent(4)
Map<String, Object> result = new HashMap<>();
result.put("a", NullHandling.defaultLongValue());
result.put("b", NullHandling.defaultDoubleValue());
result.put("c", NullHandling.defaultLongValue());
result.put("d", NullHandling.defaultDoubleValue());
Assert.assertEquals(timeseriesResult(result), runQuery(query, factory, ImmutableList.of(index4)));
// string(1) + long(2) + float(3) + nonexistent(4)
// Note: Expressions implicitly cast strings to numbers, leading to the a/b vs c/d difference.
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 31L * 3, "b", THIRTY_ONE_POINT_ONE * 2 + 31, "c", 31L * 3, "d", THIRTY_ONE_POINT_ONE * 2 + 31)), runQuery(query, factory, ImmutableList.of(index1, index2, index3, index4)));
// long(2) + float(3) + nonexistent(4)
Assert.assertEquals(timeseriesResult(ImmutableMap.of("a", 31L * 2, "b", THIRTY_ONE_POINT_ONE + 31, "c", 31L * 2, "d", THIRTY_ONE_POINT_ONE + 31)), runQuery(query, factory, ImmutableList.of(index2, index3, index4)));
}
use of org.apache.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.
the class VarianceAggregatorFactoryTest method testResultArraySignature.
@Test
public void testResultArraySignature() {
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2000/3000").granularity(Granularities.HOUR).aggregators(new CountAggregatorFactory("count"), new VarianceAggregatorFactory("variance", "col"), new VarianceFoldingAggregatorFactory("varianceFold", "col", null)).postAggregators(new FieldAccessPostAggregator("variance-access", "variance"), new FinalizingFieldAccessPostAggregator("variance-finalize", "variance"), new FieldAccessPostAggregator("varianceFold-access", "varianceFold"), new FinalizingFieldAccessPostAggregator("varianceFold-finalize", "varianceFold")).build();
Assert.assertEquals(RowSignature.builder().addTimeColumn().add("count", ColumnType.LONG).add("variance", null).add("varianceFold", null).add("variance-access", VarianceAggregatorFactory.TYPE).add("variance-finalize", ColumnType.DOUBLE).add("varianceFold-access", VarianceAggregatorFactory.TYPE).add("varianceFold-finalize", ColumnType.DOUBLE).build(), new TimeseriesQueryQueryToolChest().resultArraySignature(query));
}
use of org.apache.druid.query.timeseries.TimeseriesQuery in project druid by druid-io.
the class AppenderatorDriverRealtimeIndexTaskTest method sumMetric.
@Nullable
public Long sumMetric(final Task task, final DimFilter filter, final String metric) {
// Do a query.
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("test_ds").filters(filter).aggregators(ImmutableList.of(new LongSumAggregatorFactory(metric, metric))).granularity(Granularities.ALL).intervals("2000/3000").build();
List<Result<TimeseriesResultValue>> results = task.getQueryRunner(query).run(QueryPlus.wrap(query)).toList();
if (results.isEmpty()) {
return 0L;
} else {
return results.get(0).getValue().getLongMetric(metric);
}
}
Aggregations