use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.
the class CalciteJoinQueryTest method testJoinOnTimeseriesWithFloorOnTime.
@Test
public void testJoinOnTimeseriesWithFloorOnTime() throws Exception {
// Cannot vectorize JOIN operator.
cannotVectorize();
testQuery("SELECT CAST(__time AS BIGINT), m1, ANY_VALUE(dim3, 100) FROM foo WHERE (TIME_FLOOR(__time, 'PT1H'), m1) IN\n" + " (\n" + " SELECT TIME_FLOOR(__time, 'PT1H') AS t1, MIN(m1) AS t2 FROM foo WHERE dim3 = 'b'\n" + " AND __time BETWEEN '1994-04-29 00:00:00' AND '2020-01-11 00:00:00' GROUP BY 1\n" + " )\n" + "GROUP BY 1, 2\n", ImmutableList.of(GroupByQuery.builder().setDataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new QueryDataSource(Druids.newTimeseriesQueryBuilder().dataSource(CalciteTests.DATASOURCE1).intervals(querySegmentSpec(Intervals.of("1994-04-29/2020-01-11T00:00:00.001Z"))).filters(selector("dim3", "b", null)).granularity(new PeriodGranularity(Period.hours(1), null, DateTimeZone.UTC)).aggregators(aggregators(new FloatMinAggregatorFactory("a0", "m1"))).context(getTimeseriesContextWithFloorTime(TIMESERIES_CONTEXT_BY_GRAN, "d0")).build()), "j0.", "((timestamp_floor(\"__time\",'PT1H',null,'UTC') == \"j0.d0\") && (\"m1\" == \"j0.a0\"))", JoinType.INNER)).setInterval(querySegmentSpec(Filtration.eternity())).setDimensions(new DefaultDimensionSpec("__time", "d0", ColumnType.LONG), new DefaultDimensionSpec("m1", "d1", ColumnType.FLOAT)).setGranularity(Granularities.ALL).setAggregatorSpecs(aggregators(new StringAnyAggregatorFactory("a0", "dim3", 100))).setContext(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { 946684800000L, 1.0f, "[a, b]" }, new Object[] { 946771200000L, 2.0f, "[b, c]" }));
}
use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.
the class TopNQueryRunnerTest method testFullOnTopN.
@Test
public void testFullOnTopN() {
TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(QueryRunnerTestHelper.MARKET_DIMENSION).metric(QueryRunnerTestHelper.INDEX_METRIC).threshold(4).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).build();
List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "total_market").put("rows", 186L).put("index", 215679.82879638672D).put("addRowsIndexConstant", 215866.82879638672D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1743.92175D).put("minIndex", 792.3260498046875D).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "upfront").put("rows", 186L).put("index", 192046.1060180664D).put("addRowsIndexConstant", 192233.1060180664D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1870.061029D).put("minIndex", 545.9906005859375D).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "spot").put("rows", 837L).put("index", 95606.57232284546D).put("addRowsIndexConstant", 96444.57232284546D).put("uniques", QueryRunnerTestHelper.UNIQUES_9).put("maxIndex", 277.273533D).put("minIndex", 59.02102279663086D).build()))));
assertExpectedResults(expectedResults, query);
assertExpectedResults(expectedResults, query.withAggregatorSpecs(Lists.newArrayList(Iterables.concat(QueryRunnerTestHelper.COMMON_FLOAT_AGGREGATORS, Lists.newArrayList(new FloatMaxAggregatorFactory("maxIndex", "indexFloat"), new FloatMinAggregatorFactory("minIndex", "indexFloat"))))));
}
use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.
the class CompactionTaskTest method testCreateIngestionSchemaWithCustomMetricsSpec.
@Test
public void testCreateIngestionSchemaWithCustomMetricsSpec() throws IOException, SegmentLoadingException {
final AggregatorFactory[] customMetricsSpec = new AggregatorFactory[] { new CountAggregatorFactory("custom_count"), new LongSumAggregatorFactory("custom_long_sum", "agg_1"), new FloatMinAggregatorFactory("custom_float_min", "agg_3"), new DoubleMaxAggregatorFactory("custom_double_max", "agg_4") };
final List<ParallelIndexIngestionSpec> ingestionSpecs = CompactionTask.createIngestionSchema(toolbox, LockGranularity.TIME_CHUNK, new SegmentProvider(DATA_SOURCE, new CompactionIntervalSpec(COMPACTION_INTERVAL, null)), new PartitionConfigurationManager(TUNING_CONFIG), null, null, customMetricsSpec, null, COORDINATOR_CLIENT, segmentCacheManagerFactory, RETRY_POLICY_FACTORY, IOConfig.DEFAULT_DROP_EXISTING);
final List<DimensionsSpec> expectedDimensionsSpec = getExpectedDimensionsSpecForAutoGeneration();
ingestionSpecs.sort((s1, s2) -> Comparators.intervalsByStartThenEnd().compare(s1.getDataSchema().getGranularitySpec().inputIntervals().get(0), s2.getDataSchema().getGranularitySpec().inputIntervals().get(0)));
Assert.assertEquals(6, ingestionSpecs.size());
assertIngestionSchema(ingestionSpecs, expectedDimensionsSpec, Arrays.asList(customMetricsSpec), SEGMENT_INTERVALS, Granularities.MONTH, Granularities.NONE, IOConfig.DEFAULT_DROP_EXISTING);
}
use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.
the class CalciteJoinQueryTest method testJoinOnGroupByInsteadOfTimeseriesWithFloorOnTime.
@Test
public void testJoinOnGroupByInsteadOfTimeseriesWithFloorOnTime() throws Exception {
// Cannot vectorize JOIN operator.
cannotVectorize();
testQuery("SELECT CAST(__time AS BIGINT), m1, ANY_VALUE(dim3, 100) FROM foo WHERE (CAST(TIME_FLOOR(__time, 'PT1H') AS BIGINT), m1) IN\n" + " (\n" + " SELECT CAST(TIME_FLOOR(__time, 'PT1H') AS BIGINT) + 0 AS t1, MIN(m1) AS t2 FROM foo WHERE dim3 = 'b'\n" + " AND __time BETWEEN '1994-04-29 00:00:00' AND '2020-01-11 00:00:00' GROUP BY 1\n" + " )\n" + "GROUP BY 1, 2\n", ImmutableList.of(GroupByQuery.builder().setDataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new QueryDataSource(GroupByQuery.builder().setDataSource(CalciteTests.DATASOURCE1).setInterval(querySegmentSpec(Intervals.of("1994-04-29/2020-01-11T00:00:00.001Z"))).setVirtualColumns(expressionVirtualColumn("v0", "(timestamp_floor(\"__time\",'PT1H',null,'UTC') + 0)", ColumnType.LONG)).setDimFilter(selector("dim3", "b", null)).setGranularity(Granularities.ALL).setDimensions(dimensions(new DefaultDimensionSpec("v0", "d0", ColumnType.LONG))).setAggregatorSpecs(aggregators(new FloatMinAggregatorFactory("a0", "m1"))).setContext(QUERY_CONTEXT_DEFAULT).build()), "j0.", "((timestamp_floor(\"__time\",'PT1H',null,'UTC') == \"j0.d0\") && (\"m1\" == \"j0.a0\"))", JoinType.INNER)).setInterval(querySegmentSpec(Filtration.eternity())).setDimensions(new DefaultDimensionSpec("__time", "d0", ColumnType.LONG), new DefaultDimensionSpec("m1", "d1", ColumnType.FLOAT)).setGranularity(Granularities.ALL).setAggregatorSpecs(aggregators(new StringAnyAggregatorFactory("a0", "dim3", 100))).setContext(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { 946684800000L, 1.0f, "[a, b]" }, new Object[] { 946771200000L, 2.0f, "[b, c]" }));
}
Aggregations