use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class GroupByQueryRunnerTest method testSubqueryWithOuterCountAggregator.
@Test
public void testSubqueryWithOuterCountAggregator() {
final GroupByQuery subquery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setDimensions(new DefaultDimensionSpec("quality", "alias")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setLimitSpec(new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("alias", OrderByColumnSpec.Direction.ASCENDING)), null)).build();
final GroupByQuery query = makeQueryBuilder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new ArrayList<>()).setAggregatorSpecs(new CountAggregatorFactory("count")).setGranularity(QueryRunnerTestHelper.ALL_GRAN).build();
if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
expectedException.expect(ISE.class);
expectedException.expectMessage("Unknown column in order clause");
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
} else {
List<ResultRow> expectedResults = Collections.singletonList(makeRow(query, "2011-04-01", "count", 18L));
Iterable<ResultRow> results = GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
TestHelper.assertExpectedObjects(expectedResults, results, "subquery-count-agg");
}
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class GroupByQueryRunnerTest method testSubqueryWithOuterMaxOnDiskStorageContextOverride.
@Test
public void testSubqueryWithOuterMaxOnDiskStorageContextOverride() {
final GroupByQuery subquery = makeQueryBuilder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setQuerySegmentSpec(QueryRunnerTestHelper.FULL_ON_INTERVAL_SPEC).setDimensions(new DefaultDimensionSpec("quality", "alias")).setGranularity(QueryRunnerTestHelper.DAY_GRAN).setLimitSpec(new DefaultLimitSpec(ImmutableList.of(new OrderByColumnSpec("alias", OrderByColumnSpec.Direction.ASCENDING)), null)).overrideContext(ImmutableMap.of("maxOnDiskStorage", Integer.MAX_VALUE, "bufferGrouperMaxSize", Integer.MAX_VALUE)).build();
final GroupByQuery query = makeQueryBuilder().setDataSource(subquery).setQuerySegmentSpec(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(new ArrayList<>()).setAggregatorSpecs(new CountAggregatorFactory("count")).setGranularity(QueryRunnerTestHelper.ALL_GRAN).overrideContext(ImmutableMap.of("maxOnDiskStorage", 0, "bufferGrouperMaxSize", 0)).build();
if (config.getDefaultStrategy().equals(GroupByStrategySelector.STRATEGY_V1)) {
expectedException.expect(ISE.class);
expectedException.expectMessage("Unknown column in order clause");
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
} else {
expectedException.expect(ResourceLimitExceededException.class);
expectedException.expectMessage("Not enough aggregation buffer space to execute this query");
GroupByQueryRunnerTestHelper.runQuery(factory, runner, query);
}
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class GroupByQueryRunnerFactoryTest method testMergeRunnersEnsureGroupMerging.
@Test
public void testMergeRunnersEnsureGroupMerging() {
GroupByQuery query = GroupByQuery.builder().setDataSource("xx").setQuerySegmentSpec(new LegacySegmentSpec("1970/3000")).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("tags", "tags")).setAggregatorSpecs(new CountAggregatorFactory("count")).build();
QueryRunner mergedRunner = factory.getToolchest().mergeResults(new QueryRunner() {
@Override
public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
return factory.getToolchest().mergeResults(new QueryRunner() {
@Override
public Sequence run(QueryPlus queryPlus, ResponseContext responseContext) {
final Query query = queryPlus.getQuery();
try {
return new MergeSequence(query.getResultOrdering(), Sequences.simple(Arrays.asList(factory.createRunner(createSegment()).run(queryPlus, responseContext), factory.createRunner(createSegment()).run(queryPlus, responseContext))));
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}).run(queryPlus, responseContext);
}
});
Sequence<ResultRow> result = mergedRunner.run(QueryPlus.wrap(query), ResponseContext.createEmpty());
List<ResultRow> expectedResults = Arrays.asList(GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "tags", "t1", "count", 2L), GroupByQueryRunnerTestHelper.createExpectedRow(query, "1970-01-01T00:00:00.000Z", "tags", "t2", "count", 4L));
TestHelper.assertExpectedObjects(expectedResults, result.toList(), "");
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class CalciteParameterQueryTest method testNullParameter.
@Test
public void testNullParameter() throws Exception {
cannotVectorize();
// contrived example of using null as an sql parameter to at least test the codepath because lots of things dont
// actually work as null and things like 'IS NULL' fail to parse in calcite if expressed as 'IS ?'
// this will optimize out the 3rd argument because 2nd argument will be constant and not null
testQuery("SELECT COALESCE(dim2, ?, ?), COUNT(*) FROM druid.foo GROUP BY 1\n", ImmutableList.of(GroupByQuery.builder().setDataSource(CalciteTests.DATASOURCE1).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setVirtualColumns(expressionVirtualColumn("v0", "case_searched(notnull(\"dim2\"),\"dim2\",'parameter')", ColumnType.STRING)).setDimensions(dimensions(new DefaultDimensionSpec("v0", "d0", ColumnType.STRING))).setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0"))).setContext(QUERY_CONTEXT_DEFAULT).build()), NullHandling.replaceWithDefault() ? ImmutableList.of(new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 3L }) : ImmutableList.of(new Object[] { "", 1L }, new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 2L }), ImmutableList.of(new SqlParameter(SqlType.VARCHAR, "parameter"), new SqlParameter(SqlType.VARCHAR, null)));
// when converting to rel expression, this will optimize out 2nd argument to coalesce which is null
testQuery("SELECT COALESCE(dim2, ?, ?), COUNT(*) FROM druid.foo GROUP BY 1\n", ImmutableList.of(GroupByQuery.builder().setDataSource(CalciteTests.DATASOURCE1).setInterval(querySegmentSpec(Filtration.eternity())).setGranularity(Granularities.ALL).setVirtualColumns(expressionVirtualColumn("v0", "case_searched(notnull(\"dim2\"),\"dim2\",'parameter')", ColumnType.STRING)).setDimensions(dimensions(new DefaultDimensionSpec("v0", "d0", ColumnType.STRING))).setAggregatorSpecs(aggregators(new CountAggregatorFactory("a0"))).setContext(QUERY_CONTEXT_DEFAULT).build()), NullHandling.replaceWithDefault() ? ImmutableList.of(new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 3L }) : ImmutableList.of(new Object[] { "", 1L }, new Object[] { "a", 2L }, new Object[] { "abc", 1L }, new Object[] { "parameter", 2L }), ImmutableList.of(new SqlParameter(SqlType.VARCHAR, null), new SqlParameter(SqlType.VARCHAR, "parameter")));
}
use of org.apache.druid.query.aggregation.CountAggregatorFactory in project druid by druid-io.
the class CalciteJoinQueryTest method testUnionAllTwoQueriesBothQueriesAreJoin.
@Test
public void testUnionAllTwoQueriesBothQueriesAreJoin() throws Exception {
cannotVectorize();
testQuery("(" + "SELECT COUNT(*) FROM foo LEFT JOIN lookup.lookyloo ON foo.dim1 = lookyloo.k " + " UNION ALL " + "SELECT COUNT(*) FROM foo INNER JOIN lookup.lookyloo ON foo.dim1 = lookyloo.k" + ") ", ImmutableList.of(Druids.newTimeseriesQueryBuilder().dataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new LookupDataSource("lookyloo"), "j0.", equalsCondition(makeColumnExpression("dim1"), makeColumnExpression("j0.k")), JoinType.LEFT)).intervals(querySegmentSpec(Filtration.eternity())).granularity(Granularities.ALL).aggregators(aggregators(new CountAggregatorFactory("a0"))).context(QUERY_CONTEXT_DEFAULT).build(), Druids.newTimeseriesQueryBuilder().dataSource(join(new TableDataSource(CalciteTests.DATASOURCE1), new LookupDataSource("lookyloo"), "j0.", equalsCondition(makeColumnExpression("dim1"), makeColumnExpression("j0.k")), JoinType.INNER)).intervals(querySegmentSpec(Filtration.eternity())).granularity(Granularities.ALL).aggregators(aggregators(new CountAggregatorFactory("a0"))).context(QUERY_CONTEXT_DEFAULT).build()), ImmutableList.of(new Object[] { 6L }, new Object[] { 1L }));
}
Aggregations