use of org.apache.druid.query.QueryDataSource in project druid by druid-io.
the class GroupByQueryRunnerFailureTest method testResourceLimitExceededOnBroker.
@Test(timeout = 60_000L)
public void testResourceLimitExceededOnBroker() {
expectedException.expect(ResourceLimitExceededException.class);
final GroupByQuery query = GroupByQuery.builder().setDataSource(new QueryDataSource(GroupByQuery.builder().setDataSource(GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias"), new DefaultDimensionSpec("market", null)).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build()).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build())).setGranularity(Granularities.ALL).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(new LongSumAggregatorFactory("rows", "rows")).setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500)).build();
GroupByQueryRunnerTestHelper.runQuery(FACTORY, runner, query);
}
use of org.apache.druid.query.QueryDataSource in project druid by druid-io.
the class GroupByQueryRunnerFailureTest method testInsufficientResourcesOnBroker.
@Test(timeout = 60_000L)
public void testInsufficientResourcesOnBroker() {
final GroupByQuery query = GroupByQuery.builder().setDataSource(new QueryDataSource(GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build())).setGranularity(Granularities.ALL).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(new LongSumAggregatorFactory("rows", "rows")).setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500)).build();
List<ReferenceCountingResourceHolder<ByteBuffer>> holder = null;
try {
holder = MERGE_BUFFER_POOL.takeBatch(1, 10);
expectedException.expect(QueryCapacityExceededException.class);
expectedException.expectMessage("Cannot acquire 1 merge buffers. Try again after current running queries are finished.");
GroupByQueryRunnerTestHelper.runQuery(FACTORY, runner, query);
} finally {
if (holder != null) {
holder.forEach(ReferenceCountingResourceHolder::close);
}
}
}
use of org.apache.druid.query.QueryDataSource in project druid by druid-io.
the class GroupByQueryRunnerFailureTest method testNotEnoughMergeBuffersOnQueryable.
@Test(timeout = 60_000L)
public void testNotEnoughMergeBuffersOnQueryable() {
expectedException.expect(QueryTimeoutException.class);
expectedException.expectMessage("Cannot acquire enough merge buffers");
final GroupByQuery query = GroupByQuery.builder().setDataSource(new QueryDataSource(GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build())).setGranularity(Granularities.ALL).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(new LongSumAggregatorFactory("rows", "rows")).setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 500)).build();
GroupByQueryRunnerTestHelper.runQuery(FACTORY, runner, query);
}
use of org.apache.druid.query.QueryDataSource in project druid by druid-io.
the class GroupByQueryMergeBufferTest method testNestedGroupByWithNestedSubtotals.
@Test
public void testNestedGroupByWithNestedSubtotals() {
final GroupByQuery query = GroupByQuery.builder().setDataSource(new QueryDataSource(GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(Arrays.asList(DefaultDimensionSpec.of("quality"), DefaultDimensionSpec.of("market"), DefaultDimensionSpec.of("placement"))).setSubtotalsSpec(Arrays.asList(Collections.singletonList("quality"), Collections.singletonList("market"))).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build())).setGranularity(Granularities.ALL).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setDimensions(Arrays.asList(DefaultDimensionSpec.of("quality"), DefaultDimensionSpec.of("market"))).setSubtotalsSpec(Arrays.asList(Collections.singletonList("quality"), Collections.singletonList("market"))).setAggregatorSpecs(new LongSumAggregatorFactory("rows", "rows")).setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, TIMEOUT)).build();
Assert.assertEquals(3, GroupByStrategyV2.countRequiredMergeBufferNum(query));
GroupByQueryRunnerTestHelper.runQuery(FACTORY, runner, query);
// 2 for subtotal, 1 for nested group by and 1 for GroupByQueryRunnerFactory#mergeRunners
Assert.assertEquals(0, MERGE_BUFFER_POOL.getMinRemainBufferNum());
Assert.assertEquals(4, MERGE_BUFFER_POOL.getPoolSize());
}
use of org.apache.druid.query.QueryDataSource in project druid by druid-io.
the class GroupByQueryMergeBufferTest method testTripleNestedGroupBy.
@Test
public void testTripleNestedGroupBy() {
final GroupByQuery query = GroupByQuery.builder().setDataSource(new QueryDataSource(GroupByQuery.builder().setDataSource(GroupByQuery.builder().setDataSource(GroupByQuery.builder().setDataSource(QueryRunnerTestHelper.DATA_SOURCE).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(Lists.newArrayList(new DefaultDimensionSpec("quality", "alias"), new DefaultDimensionSpec("market", null), new DefaultDimensionSpec("placement", null))).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build()).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias"), new DefaultDimensionSpec("market", null)).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build()).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setGranularity(Granularities.ALL).setDimensions(new DefaultDimensionSpec("quality", "alias")).setAggregatorSpecs(Collections.singletonList(QueryRunnerTestHelper.ROWS_COUNT)).build())).setGranularity(Granularities.ALL).setInterval(QueryRunnerTestHelper.FIRST_TO_THIRD).setAggregatorSpecs(new LongSumAggregatorFactory("rows", "rows")).setContext(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, TIMEOUT)).build();
Assert.assertEquals(2, GroupByStrategyV2.countRequiredMergeBufferNum(query));
GroupByQueryRunnerTestHelper.runQuery(FACTORY, runner, query);
// This should be 1 because the broker needs 2 buffers and the queryable node needs one.
Assert.assertEquals(1, MERGE_BUFFER_POOL.getMinRemainBufferNum());
Assert.assertEquals(4, MERGE_BUFFER_POOL.getPoolSize());
}
Aggregations