Search in sources :

Example 1 with FloatMinAggregatorFactory

use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.

the class CalciteJoinQueryTest method testJoinOnTimeseriesWithFloorOnTime.

@Test
public void testJoinOnTimeseriesWithFloorOnTime() throws Exception {
    // Cannot vectorize JOIN operator.
    cannotVectorize();
    testQuery("SELECT CAST(__time AS BIGINT), m1, ANY_VALUE(dim3, 100) FROM foo WHERE (TIME_FLOOR(__time, 'PT1H'), m1) IN\n" + "   (\n" + "     SELECT TIME_FLOOR(__time, 'PT1H') AS t1, MIN(m1) AS t2 FROM foo WHERE dim3 = 'b'\n" + "         AND __time BETWEEN '1994-04-29 00:00:00' AND '2020-01-11 00:00:00' GROUP BY 1\n" + "    )\n" + "GROUP BY 1, 2\n", ImmutableList.of(GroupByQuery.builder().setDataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new QueryDataSource(Druids.newTimeseriesQueryBuilder().dataSource(CalciteTests.DATASOURCE1).intervals(querySegmentSpec(Intervals.of("1994-04-29/2020-01-11T00:00:00.001Z"))).filters(selector("dim3", "b", null)).granularity(new PeriodGranularity(Period.hours(1), null, DateTimeZone.UTC)).aggregators(aggregators(new FloatMinAggregatorFactory("a0", "m1"))).context(getTimeseriesContextWithFloorTime(TIMESERIES_CONTEXT_BY_GRAN, "d0")).build()), "j0.", "((timestamp_floor(\"__time\",'PT1H',null,'UTC') == \"j0.d0\") && (\"m1\" == \"j0.a0\"))", JoinType.INNER)).setInterval(querySegmentSpec(Filtration.eternity())).setDimensions(new DefaultDimensionSpec("__time", "d0", ColumnType.LONG), new DefaultDimensionSpec("m1", "d1", ColumnType.FLOAT)).setGranularity(Granularities.ALL).setAggregatorSpecs(aggregators(new StringAnyAggregatorFactory("a0", "dim3", 100))).setContext(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { 946684800000L, 1.0f, "[a, b]" }, new Object[] { 946771200000L, 2.0f, "[b, c]" }));
}
Also used : GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) StringAnyAggregatorFactory(org.apache.druid.query.aggregation.any.StringAnyAggregatorFactory) PeriodGranularity(org.apache.druid.java.util.common.granularity.PeriodGranularity) FloatMinAggregatorFactory(org.apache.druid.query.aggregation.FloatMinAggregatorFactory) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) Test(org.junit.Test)

Example 2 with FloatMinAggregatorFactory

use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.

the class TopNQueryRunnerTest method testFullOnTopN.

@Test
public void testFullOnTopN() {
    TopNQuery query = new TopNQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.ALL_GRAN).dimension(QueryRunnerTestHelper.MARKET_DIMENSION).metric(QueryRunnerTestHelper.INDEX_METRIC).threshold(4).intervals(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).aggregators(Lists.newArrayList(Iterables.concat(commonAggregators, Lists.newArrayList(new DoubleMaxAggregatorFactory("maxIndex", "index"), new DoubleMinAggregatorFactory("minIndex", "index"))))).postAggregators(QueryRunnerTestHelper.ADD_ROWS_INDEX_CONSTANT).build();
    List<Result<TopNResultValue>> expectedResults = Collections.singletonList(new Result<>(DateTimes.of("2011-01-12T00:00:00.000Z"), new TopNResultValue(Arrays.<Map<String, Object>>asList(ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "total_market").put("rows", 186L).put("index", 215679.82879638672D).put("addRowsIndexConstant", 215866.82879638672D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1743.92175D).put("minIndex", 792.3260498046875D).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "upfront").put("rows", 186L).put("index", 192046.1060180664D).put("addRowsIndexConstant", 192233.1060180664D).put("uniques", QueryRunnerTestHelper.UNIQUES_2).put("maxIndex", 1870.061029D).put("minIndex", 545.9906005859375D).build(), ImmutableMap.<String, Object>builder().put(QueryRunnerTestHelper.MARKET_DIMENSION, "spot").put("rows", 837L).put("index", 95606.57232284546D).put("addRowsIndexConstant", 96444.57232284546D).put("uniques", QueryRunnerTestHelper.UNIQUES_9).put("maxIndex", 277.273533D).put("minIndex", 59.02102279663086D).build()))));
    assertExpectedResults(expectedResults, query);
    assertExpectedResults(expectedResults, query.withAggregatorSpecs(Lists.newArrayList(Iterables.concat(QueryRunnerTestHelper.COMMON_FLOAT_AGGREGATORS, Lists.newArrayList(new FloatMaxAggregatorFactory("maxIndex", "indexFloat"), new FloatMinAggregatorFactory("minIndex", "indexFloat"))))));
}
Also used : DoubleMaxAggregatorFactory(org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory) FloatMaxAggregatorFactory(org.apache.druid.query.aggregation.FloatMaxAggregatorFactory) DoubleMinAggregatorFactory(org.apache.druid.query.aggregation.DoubleMinAggregatorFactory) FloatMinAggregatorFactory(org.apache.druid.query.aggregation.FloatMinAggregatorFactory) Result(org.apache.druid.query.Result) InitializedNullHandlingTest(org.apache.druid.testing.InitializedNullHandlingTest) Test(org.junit.Test)

Example 3 with FloatMinAggregatorFactory

use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.

the class CompactionTaskTest method testCreateIngestionSchemaWithCustomMetricsSpec.

@Test
public void testCreateIngestionSchemaWithCustomMetricsSpec() throws IOException, SegmentLoadingException {
    final AggregatorFactory[] customMetricsSpec = new AggregatorFactory[] { new CountAggregatorFactory("custom_count"), new LongSumAggregatorFactory("custom_long_sum", "agg_1"), new FloatMinAggregatorFactory("custom_float_min", "agg_3"), new DoubleMaxAggregatorFactory("custom_double_max", "agg_4") };
    final List<ParallelIndexIngestionSpec> ingestionSpecs = CompactionTask.createIngestionSchema(toolbox, LockGranularity.TIME_CHUNK, new SegmentProvider(DATA_SOURCE, new CompactionIntervalSpec(COMPACTION_INTERVAL, null)), new PartitionConfigurationManager(TUNING_CONFIG), null, null, customMetricsSpec, null, COORDINATOR_CLIENT, segmentCacheManagerFactory, RETRY_POLICY_FACTORY, IOConfig.DEFAULT_DROP_EXISTING);
    final List<DimensionsSpec> expectedDimensionsSpec = getExpectedDimensionsSpecForAutoGeneration();
    ingestionSpecs.sort((s1, s2) -> Comparators.intervalsByStartThenEnd().compare(s1.getDataSchema().getGranularitySpec().inputIntervals().get(0), s2.getDataSchema().getGranularitySpec().inputIntervals().get(0)));
    Assert.assertEquals(6, ingestionSpecs.size());
    assertIngestionSchema(ingestionSpecs, expectedDimensionsSpec, Arrays.asList(customMetricsSpec), SEGMENT_INTERVALS, Granularities.MONTH, Granularities.NONE, IOConfig.DEFAULT_DROP_EXISTING);
}
Also used : DoubleMaxAggregatorFactory(org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) ParallelIndexIngestionSpec(org.apache.druid.indexing.common.task.batch.parallel.ParallelIndexIngestionSpec) DoubleLastAggregatorFactory(org.apache.druid.query.aggregation.last.DoubleLastAggregatorFactory) FloatMinAggregatorFactory(org.apache.druid.query.aggregation.FloatMinAggregatorFactory) FloatFirstAggregatorFactory(org.apache.druid.query.aggregation.first.FloatFirstAggregatorFactory) LongSumAggregatorFactory(org.apache.druid.query.aggregation.LongSumAggregatorFactory) AggregatorFactory(org.apache.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) DoubleMaxAggregatorFactory(org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory) LongMaxAggregatorFactory(org.apache.druid.query.aggregation.LongMaxAggregatorFactory) SegmentProvider(org.apache.druid.indexing.common.task.CompactionTask.SegmentProvider) FloatMinAggregatorFactory(org.apache.druid.query.aggregation.FloatMinAggregatorFactory) CountAggregatorFactory(org.apache.druid.query.aggregation.CountAggregatorFactory) PartitionConfigurationManager(org.apache.druid.indexing.common.task.CompactionTask.PartitionConfigurationManager) DimensionsSpec(org.apache.druid.data.input.impl.DimensionsSpec) Test(org.junit.Test)

Example 4 with FloatMinAggregatorFactory

use of org.apache.druid.query.aggregation.FloatMinAggregatorFactory in project druid by druid-io.

the class CalciteJoinQueryTest method testJoinOnGroupByInsteadOfTimeseriesWithFloorOnTime.

@Test
public void testJoinOnGroupByInsteadOfTimeseriesWithFloorOnTime() throws Exception {
    // Cannot vectorize JOIN operator.
    cannotVectorize();
    testQuery("SELECT CAST(__time AS BIGINT), m1, ANY_VALUE(dim3, 100) FROM foo WHERE (CAST(TIME_FLOOR(__time, 'PT1H') AS BIGINT), m1) IN\n" + "   (\n" + "     SELECT CAST(TIME_FLOOR(__time, 'PT1H') AS BIGINT) + 0 AS t1, MIN(m1) AS t2 FROM foo WHERE dim3 = 'b'\n" + "         AND __time BETWEEN '1994-04-29 00:00:00' AND '2020-01-11 00:00:00' GROUP BY 1\n" + "    )\n" + "GROUP BY 1, 2\n", ImmutableList.of(GroupByQuery.builder().setDataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new QueryDataSource(GroupByQuery.builder().setDataSource(CalciteTests.DATASOURCE1).setInterval(querySegmentSpec(Intervals.of("1994-04-29/2020-01-11T00:00:00.001Z"))).setVirtualColumns(expressionVirtualColumn("v0", "(timestamp_floor(\"__time\",'PT1H',null,'UTC') + 0)", ColumnType.LONG)).setDimFilter(selector("dim3", "b", null)).setGranularity(Granularities.ALL).setDimensions(dimensions(new DefaultDimensionSpec("v0", "d0", ColumnType.LONG))).setAggregatorSpecs(aggregators(new FloatMinAggregatorFactory("a0", "m1"))).setContext(QUERY_CONTEXT_DEFAULT).build()), "j0.", "((timestamp_floor(\"__time\",'PT1H',null,'UTC') == \"j0.d0\") && (\"m1\" == \"j0.a0\"))", JoinType.INNER)).setInterval(querySegmentSpec(Filtration.eternity())).setDimensions(new DefaultDimensionSpec("__time", "d0", ColumnType.LONG), new DefaultDimensionSpec("m1", "d1", ColumnType.FLOAT)).setGranularity(Granularities.ALL).setAggregatorSpecs(aggregators(new StringAnyAggregatorFactory("a0", "dim3", 100))).setContext(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { 946684800000L, 1.0f, "[a, b]" }, new Object[] { 946771200000L, 2.0f, "[b, c]" }));
}
Also used : GlobalTableDataSource(org.apache.druid.query.GlobalTableDataSource) TableDataSource(org.apache.druid.query.TableDataSource) QueryDataSource(org.apache.druid.query.QueryDataSource) StringAnyAggregatorFactory(org.apache.druid.query.aggregation.any.StringAnyAggregatorFactory) DefaultDimensionSpec(org.apache.druid.query.dimension.DefaultDimensionSpec) FloatMinAggregatorFactory(org.apache.druid.query.aggregation.FloatMinAggregatorFactory) Test(org.junit.Test)

Aggregations

FloatMinAggregatorFactory (org.apache.druid.query.aggregation.FloatMinAggregatorFactory)4 Test (org.junit.Test)4 GlobalTableDataSource (org.apache.druid.query.GlobalTableDataSource)2 QueryDataSource (org.apache.druid.query.QueryDataSource)2 TableDataSource (org.apache.druid.query.TableDataSource)2 DoubleMaxAggregatorFactory (org.apache.druid.query.aggregation.DoubleMaxAggregatorFactory)2 StringAnyAggregatorFactory (org.apache.druid.query.aggregation.any.StringAnyAggregatorFactory)2 DefaultDimensionSpec (org.apache.druid.query.dimension.DefaultDimensionSpec)2 DimensionsSpec (org.apache.druid.data.input.impl.DimensionsSpec)1 PartitionConfigurationManager (org.apache.druid.indexing.common.task.CompactionTask.PartitionConfigurationManager)1 SegmentProvider (org.apache.druid.indexing.common.task.CompactionTask.SegmentProvider)1 ParallelIndexIngestionSpec (org.apache.druid.indexing.common.task.batch.parallel.ParallelIndexIngestionSpec)1 PeriodGranularity (org.apache.druid.java.util.common.granularity.PeriodGranularity)1 Result (org.apache.druid.query.Result)1 AggregatorFactory (org.apache.druid.query.aggregation.AggregatorFactory)1 CountAggregatorFactory (org.apache.druid.query.aggregation.CountAggregatorFactory)1 DoubleMinAggregatorFactory (org.apache.druid.query.aggregation.DoubleMinAggregatorFactory)1 FloatMaxAggregatorFactory (org.apache.druid.query.aggregation.FloatMaxAggregatorFactory)1 LongMaxAggregatorFactory (org.apache.druid.query.aggregation.LongMaxAggregatorFactory)1 LongSumAggregatorFactory (org.apache.druid.query.aggregation.LongSumAggregatorFactory)1