use of org.apache.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.
the class GroupByQueryRunnerTest method testGroupByNestedWithInnerQueryNumericsWithLongTime.
@Test
public void testGroupByNestedWithInnerQueryNumericsWithLongTime() {
if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
expectedException.expect(UnsupportedOperationException.class);
expectedException.expectMessage("GroupBy v1 only supports dimensions with an outputType of STRING.");
}
GroupByQuery subQuery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("market", "alias"), new DefaultDimensionSpec("__time", "time_alias", ColumnType.LONG), new DefaultDimensionSpec("index", "index_alias", ColumnType.FLOAT)).setAggregatorSpecs(QueryRunnerTestHelper.ROWS_COUNT).setGranularity(QueryRunnerTestHelper.ALL_GRAN).build();
GroupByQuery outerQuery = makeQueryBuilder().setDataSource(subQuery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new DefaultDimensionSpec("alias", "market"), new DefaultDimensionSpec("time_alias", "time_alias2", ColumnType.LONG)).setAggregatorSpecs(new LongMaxAggregatorFactory("time_alias_max", "time_alias"), new DoubleMaxAggregatorFactory("index_alias_max", "index_alias")).setGranularity(QueryRunnerTestHelper.ALL_GRAN).build();
List<ResultRow> expectedResults = Arrays.asList(makeRow(outerQuery, "2011-04-01", "market", "spot", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 158.74722290039062), makeRow(outerQuery, "2011-04-01", "market", "spot", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 166.01605224609375), makeRow(outerQuery, "2011-04-01", "market", "total_market", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 1522.043701171875), makeRow(outerQuery, "2011-04-01", "market", "total_market", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 1321.375), makeRow(outerQuery, "2011-04-01", "market", "upfront", "time_alias2", 1301616000000L, "time_alias_max", 1301616000000L, "index_alias_max", 1447.3411865234375), makeRow(outerQuery, "2011-04-01", "market", "upfront", "time_alias2", 1301702400000L, "time_alias_max", 1301702400000L, "index_alias_max", 1144.3424072265625));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, outerQuery);
TestHelper.assertExpectedObjects(expectedResults, results, "numerics");
}
use of org.apache.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.
the class CalciteJoinQueryTest method testNotInAggregationSubquery.
@Test
@Parameters(source = QueryContextForJoinProvider.class)
public void testNotInAggregationSubquery(Map<String, Object> queryContext) throws Exception {
// Cannot vectorize JOIN operator.
cannotVectorize();
testQuery("SELECT DISTINCT __time FROM druid.foo WHERE __time NOT IN (SELECT MAX(__time) FROM druid.foo)", queryContext, ImmutableList.of(GroupByQuery.builder().setDataSource(join(join(new TableDataSource(CalciteTests.DATASOURCE1), new QueryDataSource(GroupByQuery.builder().setDataSource(Druids.newTimeseriesQueryBuilder().dataSource(CalciteTests.DATASOURCE1).intervals(querySegmentSpec(Filtration.eternity())).granularity(Granularities.ALL).aggregators(new LongMaxAggregatorFactory("a0", "__time")).context(QUERY_CONTEXT_DEFAULT).build()).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setAggregatorSpecs(new CountAggregatorFactory("_a0"), NullHandling.sqlCompatible() ? new FilteredAggregatorFactory(new CountAggregatorFactory("_a1"), not(selector("a0", null, null))) : new CountAggregatorFactory("_a1")).setContext(queryContext).build()), "j0.", "1", JoinType.INNER), new QueryDataSource(Druids.newTimeseriesQueryBuilder().dataSource(CalciteTests.DATASOURCE1).intervals(querySegmentSpec(Filtration.eternity())).granularity(Granularities.ALL).aggregators(new LongMaxAggregatorFactory("a0", "__time")).postAggregators(expressionPostAgg("p0", "1")).context(QUERY_CONTEXT_DEFAULT).build()), "_j0.", "(\"__time\" == \"_j0.a0\")", JoinType.LEFT)).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setDimFilter(or(selector("j0._a0", "0", null), and(selector("_j0.p0", null, null), expressionFilter("(\"j0._a1\" >= \"j0._a0\")")))).setDimensions(dimensions(new DefaultDimensionSpec("__time", "d0", ColumnType.LONG))).setContext(queryContext).build()), ImmutableList.of(new Object[] { timestamp("2000-01-01") }, new Object[] { timestamp("2000-01-02") }, new Object[] { timestamp("2000-01-03") }, new Object[] { timestamp("2001-01-01") }, new Object[] { timestamp("2001-01-02") }));
}
use of org.apache.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.
the class CalciteCorrelatedQueryTest method testCorrelatedSubqueryWithCorrelatedQueryFilter.
@Test
@Parameters(source = QueryContextForJoinProvider.class)
public void testCorrelatedSubqueryWithCorrelatedQueryFilter(Map<String, Object> queryContext) throws Exception {
cannotVectorize();
queryContext = withLeftDirectAccessEnabled(queryContext);
testQuery("select country, ANY_VALUE(\n" + " select max(\"users\") from (\n" + " select floor(__time to day), count(user) \"users\" from visits f where f.country = visits.country and f.city = 'A' group by 1\n" + " )\n" + " ) as \"dailyVisits\"\n" + "from visits \n" + " where city = 'B'" + " group by 1", queryContext, ImmutableList.of(GroupByQuery.builder().setDataSource(join(new TableDataSource(CalciteTests.USERVISITDATASOURCE), new QueryDataSource(GroupByQuery.builder().setDataSource(GroupByQuery.builder().setDataSource(CalciteTests.USERVISITDATASOURCE).setQuerySegmentSpec(querySegmentSpec(Intervals.ETERNITY)).setVirtualColumns(new ExpressionVirtualColumn("v0", "timestamp_floor(\"__time\",'P1D',null,'UTC')", ColumnType.LONG, TestExprMacroTable.INSTANCE)).setDimensions(new DefaultDimensionSpec("v0", "d0", ColumnType.LONG), new DefaultDimensionSpec("country", "d1")).setAggregatorSpecs(new FilteredAggregatorFactory(new CountAggregatorFactory("a0"), not(selector("user", null, null)))).setDimFilter(and(selector("city", "A", null), not(selector("country", null, null)))).setContext(withTimestampResultContext(queryContext, "d0", Granularities.DAY)).setGranularity(new AllGranularity()).build()).setQuerySegmentSpec(querySegmentSpec(Intervals.ETERNITY)).setDimensions(new DefaultDimensionSpec("d1", "_d0")).setAggregatorSpecs(new LongMaxAggregatorFactory("_a0", "a0")).setGranularity(new AllGranularity()).setContext(queryContext).build()), "j0.", equalsCondition(makeColumnExpression("country"), makeColumnExpression("j0._d0")), JoinType.LEFT, selector("city", "B", null))).setQuerySegmentSpec(querySegmentSpec(Intervals.ETERNITY)).setDimensions(new DefaultDimensionSpec("country", "d0")).setAggregatorSpecs(new LongAnyAggregatorFactory("a0", "j0._a0")).setGranularity(new AllGranularity()).setContext(queryContext).build()), ImmutableList.of(new Object[] { "canada", 2L }));
}
use of org.apache.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.
the class NestedQueryPushDownTest method testNestedQueryWithRenamedDimensions.
@Test
public void testNestedQueryWithRenamedDimensions() {
QuerySegmentSpec intervalSpec = new MultipleIntervalSegmentSpec(Collections.singletonList(Intervals.utc(1500000000000L, 1600000000000L)));
GroupByQuery query = GroupByQuery.builder().setDataSource("blah").setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("dimA", "dimA"), new DefaultDimensionSpec("dimB", "newDimB")).setAggregatorSpecs(new LongSumAggregatorFactory("metASum", "metA"), new LongSumAggregatorFactory("metBSum", "metB")).setGranularity(Granularities.ALL).build();
GroupByQuery nestedQuery = GroupByQuery.builder().setDataSource(query).setQuerySegmentSpec(intervalSpec).setDimensions(new DefaultDimensionSpec("newDimB", "renamedDimB")).setAggregatorSpecs(new LongMaxAggregatorFactory("maxBSum", "metBSum")).setContext(ImmutableMap.of(GroupByQueryConfig.CTX_KEY_FORCE_PUSH_DOWN_NESTED_QUERY, true)).setGranularity(Granularities.ALL).build();
Sequence<ResultRow> queryResult = runNestedQueryWithForcePushDown(nestedQuery);
List<ResultRow> results = queryResult.toList();
ResultRow expectedRow0 = GroupByQueryRunnerTestHelper.createExpectedRow(nestedQuery, "2017-07-14T02:40:00.000Z", "renamedDimB", "sour", "maxBSum", 20L);
ResultRow expectedRow1 = GroupByQueryRunnerTestHelper.createExpectedRow(nestedQuery, "2017-07-14T02:40:00.000Z", "renamedDimB", "sweet", "maxBSum", 60L);
Assert.assertEquals(2, results.size());
Assert.assertEquals(expectedRow0, results.get(0));
Assert.assertEquals(expectedRow1, results.get(1));
}
use of org.apache.druid.query.aggregation.LongMaxAggregatorFactory in project druid by druid-io.
the class MetadataTest method testMerge.
@Test
public void testMerge() {
Assert.assertNull(Metadata.merge(null, null));
Assert.assertNull(Metadata.merge(ImmutableList.of(), null));
List<Metadata> metadataToBeMerged = new ArrayList<>();
metadataToBeMerged.add(null);
Assert.assertNull(Metadata.merge(metadataToBeMerged, null));
// sanity merge check
AggregatorFactory[] aggs = new AggregatorFactory[] { new LongMaxAggregatorFactory("n", "f") };
final Metadata m1 = new Metadata(Collections.singletonMap("k", "v"), aggs, new TimestampSpec("ds", "auto", null), Granularities.ALL, Boolean.FALSE);
final Metadata m2 = new Metadata(Collections.singletonMap("k", "v"), aggs, new TimestampSpec("ds", "auto", null), Granularities.ALL, Boolean.FALSE);
final Metadata m3 = new Metadata(Collections.singletonMap("k", "v"), aggs, new TimestampSpec("ds", "auto", null), Granularities.ALL, Boolean.TRUE);
final Metadata merged = new Metadata(Collections.singletonMap("k", "v"), new AggregatorFactory[] { new LongMaxAggregatorFactory("n", "n") }, new TimestampSpec("ds", "auto", null), Granularities.ALL, Boolean.FALSE);
Assert.assertEquals(merged, Metadata.merge(ImmutableList.of(m1, m2), null));
// merge check with one metadata being null
metadataToBeMerged.clear();
metadataToBeMerged.add(m1);
metadataToBeMerged.add(m2);
metadataToBeMerged.add(null);
final Metadata merged2 = new Metadata(Collections.singletonMap("k", "v"), null, null, null, null);
Assert.assertEquals(merged2, Metadata.merge(metadataToBeMerged, null));
// merge check with client explicitly providing merged aggregators
AggregatorFactory[] explicitAggs = new AggregatorFactory[] { new DoubleMaxAggregatorFactory("x", "y") };
final Metadata merged3 = new Metadata(Collections.singletonMap("k", "v"), explicitAggs, null, null, null);
Assert.assertEquals(merged3, Metadata.merge(metadataToBeMerged, explicitAggs));
final Metadata merged4 = new Metadata(Collections.singletonMap("k", "v"), explicitAggs, new TimestampSpec("ds", "auto", null), Granularities.ALL, null);
Assert.assertEquals(merged4, Metadata.merge(ImmutableList.of(m3, m2), explicitAggs));
}
Aggregations