use of org.apache.druid.query.spec.MultipleIntervalSegmentSpec in project druid by druid-io.
the class ClientQuerySegmentWalkerTest method testGroupByOnScanMultiValue.
@Test
public void testGroupByOnScanMultiValue() {
ScanQuery subquery = new Druids.ScanQueryBuilder().dataSource(MULTI).columns("s", "n").intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.ETERNITY))).legacy(false).resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST).build();
final GroupByQuery query = (GroupByQuery) GroupByQuery.builder().setDataSource(new QueryDataSource(subquery)).setGranularity(Granularities.ALL).setInterval(Intervals.ONLY_ETERNITY).setDimensions(DefaultDimensionSpec.of("s")).setAggregatorSpecs(new LongSumAggregatorFactory("sum_n", "n")).build().withId(DUMMY_QUERY_ID);
testQuery(query, // GroupBy handles its own subqueries; only the inner one will go to the cluster.
ImmutableList.of(ExpectedQuery.cluster(subquery.withId(DUMMY_QUERY_ID).withSubQueryId("1.1")), ExpectedQuery.local(query.withDataSource(InlineDataSource.fromIterable(ImmutableList.of(new Object[] { ImmutableList.of("a", "b"), 1 }, new Object[] { ImmutableList.of("a", "c"), 2 }, new Object[] { ImmutableList.of("b"), 3 }, new Object[] { ImmutableList.of("c"), 4 }), RowSignature.builder().add("s", null).add("n", null).build())))), ImmutableList.of(new Object[] { "a", 3L }, new Object[] { "b", 4L }, new Object[] { "c", 6L }));
Assert.assertEquals(2, scheduler.getTotalRun().get());
Assert.assertEquals(1, scheduler.getTotalPrioritizedAndLaned().get());
Assert.assertEquals(2, scheduler.getTotalAcquired().get());
Assert.assertEquals(2, scheduler.getTotalReleased().get());
}
use of org.apache.druid.query.spec.MultipleIntervalSegmentSpec in project druid by druid-io.
the class ClientQuerySegmentWalkerTest method testTopNScanMultiValue.
@Test
public void testTopNScanMultiValue() {
ScanQuery subquery = new Druids.ScanQueryBuilder().dataSource(MULTI).columns("s", "n").intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.ETERNITY))).legacy(false).resultFormat(ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST).build();
final TopNQuery query = (TopNQuery) new TopNQueryBuilder().dataSource(new QueryDataSource(subquery)).granularity(Granularities.ALL).intervals(Intervals.ONLY_ETERNITY).dimension(DefaultDimensionSpec.of("s")).metric("sum_n").threshold(100).aggregators(new LongSumAggregatorFactory("sum_n", "n")).build().withId(DUMMY_QUERY_ID);
testQuery(query, // GroupBy handles its own subqueries; only the inner one will go to the cluster.
ImmutableList.of(ExpectedQuery.cluster(subquery.withId(DUMMY_QUERY_ID).withSubQueryId("1.1")), ExpectedQuery.local(query.withDataSource(InlineDataSource.fromIterable(ImmutableList.of(new Object[] { ImmutableList.of("a", "b"), 1 }, new Object[] { ImmutableList.of("a", "c"), 2 }, new Object[] { ImmutableList.of("b"), 3 }, new Object[] { ImmutableList.of("c"), 4 }), RowSignature.builder().add("s", null).add("n", null).build())))), ImmutableList.of(new Object[] { Intervals.ETERNITY.getStartMillis(), "c", 6L }, new Object[] { Intervals.ETERNITY.getStartMillis(), "b", 4L }, new Object[] { Intervals.ETERNITY.getStartMillis(), "a", 3L }));
Assert.assertEquals(2, scheduler.getTotalRun().get());
Assert.assertEquals(1, scheduler.getTotalPrioritizedAndLaned().get());
Assert.assertEquals(2, scheduler.getTotalAcquired().get());
Assert.assertEquals(2, scheduler.getTotalReleased().get());
}
use of org.apache.druid.query.spec.MultipleIntervalSegmentSpec in project druid by druid-io.
the class UnifiedIndexerAppenderatorsManagerTest method test_getBundle_knownDataSource.
@Test
public void test_getBundle_knownDataSource() {
final UnifiedIndexerAppenderatorsManager.DatasourceBundle bundle = manager.getBundle(Druids.newScanQueryBuilder().dataSource(appenderator.getDataSource()).intervals(new MultipleIntervalSegmentSpec(Intervals.ONLY_ETERNITY)).build());
Assert.assertEquals("myDataSource", bundle.getWalker().getDataSource());
}
use of org.apache.druid.query.spec.MultipleIntervalSegmentSpec in project druid by druid-io.
the class SetAndVerifyContextQueryRunnerTest method testTimeoutDefaultTooBigAndOverflows.
@Test
public void testTimeoutDefaultTooBigAndOverflows() {
Query<ScanResultValue> query = new Druids.ScanQueryBuilder().dataSource("foo").intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.ETERNITY))).build();
ServerConfig defaultConfig = new ServerConfig() {
@Override
public long getDefaultQueryTimeout() {
return Long.MAX_VALUE;
}
};
QueryRunner<ScanResultValue> mockRunner = EasyMock.createMock(QueryRunner.class);
SetAndVerifyContextQueryRunner<ScanResultValue> queryRunner = new SetAndVerifyContextQueryRunner<>(defaultConfig, mockRunner);
Query<ScanResultValue> transformed = queryRunner.withTimeoutAndMaxScatterGatherBytes(query, defaultConfig);
// timeout is not set, default timeout has been set to long.max, make sure timeout is still in the future
Assert.assertEquals((Long) Long.MAX_VALUE, transformed.getContextValue(DirectDruidClient.QUERY_FAIL_TIME));
}
use of org.apache.druid.query.spec.MultipleIntervalSegmentSpec in project druid by druid-io.
the class SetAndVerifyContextQueryRunnerTest method testTimeoutZeroIsNotImmediateTimeoutDefaultServersideMax.
@Test
public void testTimeoutZeroIsNotImmediateTimeoutDefaultServersideMax() {
Query<ScanResultValue> query = new Druids.ScanQueryBuilder().dataSource("foo").intervals(new MultipleIntervalSegmentSpec(ImmutableList.of(Intervals.ETERNITY))).context(ImmutableMap.of(QueryContexts.TIMEOUT_KEY, 0)).build();
ServerConfig defaultConfig = new ServerConfig();
QueryRunner<ScanResultValue> mockRunner = EasyMock.createMock(QueryRunner.class);
SetAndVerifyContextQueryRunner<ScanResultValue> queryRunner = new SetAndVerifyContextQueryRunner<>(defaultConfig, mockRunner);
Query<ScanResultValue> transformed = queryRunner.withTimeoutAndMaxScatterGatherBytes(query, defaultConfig);
// timeout is set to 0, so withTimeoutAndMaxScatterGatherBytes should set QUERY_FAIL_TIME to be the current
// time + max query timeout at the time the method was called
// since default is long max, expect long max since current time would overflow
Assert.assertEquals((Long) Long.MAX_VALUE, transformed.getContextValue(DirectDruidClient.QUERY_FAIL_TIME));
}
Aggregations