use of io.druid.query.QueryRunner in project druid by druid-io.
the class CachingClusteredClientTest method testSingleDimensionPruning.
@Test
public void testSingleDimensionPruning() throws Exception {
DimFilter filter = Druids.newAndDimFilterBuilder().fields(Arrays.asList(Druids.newOrDimFilterBuilder().fields(Arrays.asList(new SelectorDimFilter("dim1", "a", null), new BoundDimFilter("dim1", "from", "to", false, false, false, null, StringComparators.LEXICOGRAPHIC))).build(), Druids.newAndDimFilterBuilder().fields(Arrays.asList(new InDimFilter("dim2", Arrays.asList("a", "c", "e", "g"), null), new BoundDimFilter("dim2", "aaa", "hi", false, false, false, null, StringComparators.LEXICOGRAPHIC), new BoundDimFilter("dim2", "e", "zzz", true, true, false, null, StringComparators.LEXICOGRAPHIC))).build())).build();
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).filters(filter).granularity(GRANULARITY).intervals(SEG_SPEC).context(CONTEXT).intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS);
TimeseriesQuery query = builder.build();
Map<String, List> context = new HashMap<>();
final Interval interval1 = new Interval("2011-01-06/2011-01-07");
final Interval interval2 = new Interval("2011-01-07/2011-01-08");
final Interval interval3 = new Interval("2011-01-08/2011-01-09");
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
final DruidServer lastServer = servers[random.nextInt(servers.length)];
ServerSelector selector1 = makeMockSingleDimensionSelector(lastServer, "dim1", null, "b", 1);
ServerSelector selector2 = makeMockSingleDimensionSelector(lastServer, "dim1", "e", "f", 2);
ServerSelector selector3 = makeMockSingleDimensionSelector(lastServer, "dim1", "hi", "zzz", 3);
ServerSelector selector4 = makeMockSingleDimensionSelector(lastServer, "dim2", "a", "e", 4);
ServerSelector selector5 = makeMockSingleDimensionSelector(lastServer, "dim2", null, null, 5);
ServerSelector selector6 = makeMockSingleDimensionSelector(lastServer, "other", "b", null, 6);
timeline.add(interval1, "v", new StringPartitionChunk<>(null, "a", 1, selector1));
timeline.add(interval1, "v", new StringPartitionChunk<>("a", "b", 2, selector2));
timeline.add(interval1, "v", new StringPartitionChunk<>("b", null, 3, selector3));
timeline.add(interval2, "v", new StringPartitionChunk<>(null, "d", 4, selector4));
timeline.add(interval2, "v", new StringPartitionChunk<>("d", null, 5, selector5));
timeline.add(interval3, "v", new StringPartitionChunk<>(null, null, 6, selector6));
final Capture<TimeseriesQuery> capture = Capture.newInstance();
final Capture<Map<String, List>> contextCap = Capture.newInstance();
QueryRunner mockRunner = EasyMock.createNiceMock(QueryRunner.class);
EasyMock.expect(mockRunner.run(EasyMock.capture(capture), EasyMock.capture(contextCap))).andReturn(Sequences.empty()).anyTimes();
EasyMock.expect(serverView.getQueryRunner(lastServer)).andReturn(mockRunner).anyTimes();
EasyMock.replay(serverView);
EasyMock.replay(mockRunner);
List<SegmentDescriptor> descriptors = new ArrayList<>();
descriptors.add(new SegmentDescriptor(interval1, "v", 1));
descriptors.add(new SegmentDescriptor(interval1, "v", 3));
descriptors.add(new SegmentDescriptor(interval2, "v", 5));
descriptors.add(new SegmentDescriptor(interval3, "v", 6));
MultipleSpecificSegmentSpec expected = new MultipleSpecificSegmentSpec(descriptors);
Sequences.toList(runner.run(query, context), Lists.newArrayList());
Assert.assertEquals(expected, capture.getValue().getQuerySegmentSpec());
}
use of io.druid.query.QueryRunner in project druid by druid-io.
the class CachingClusteredClientTest method testOutOfOrderBackgroundCachePopulation.
@Test
public void testOutOfOrderBackgroundCachePopulation() throws Exception {
// to trigger the actual execution when we are ready to shuffle the order.
abstract class DrainTask implements Runnable {
}
final ForwardingListeningExecutorService randomizingExecutorService = new ForwardingListeningExecutorService() {
final ConcurrentLinkedDeque<Pair<SettableFuture, Object>> taskQueue = new ConcurrentLinkedDeque<>();
final ListeningExecutorService delegate = MoreExecutors.listeningDecorator(// are complete before moving on to the next query run.
MoreExecutors.sameThreadExecutor());
@Override
protected ListeningExecutorService delegate() {
return delegate;
}
private <T> ListenableFuture<T> maybeSubmitTask(Object task, boolean wait) {
if (wait) {
SettableFuture<T> future = SettableFuture.create();
taskQueue.addFirst(Pair.<SettableFuture, Object>of(future, task));
return future;
} else {
List<Pair<SettableFuture, Object>> tasks = Lists.newArrayList(taskQueue.iterator());
Collections.shuffle(tasks, new Random(0));
for (final Pair<SettableFuture, Object> pair : tasks) {
ListenableFuture future = pair.rhs instanceof Callable ? delegate.submit((Callable) pair.rhs) : delegate.submit((Runnable) pair.rhs);
Futures.addCallback(future, new FutureCallback() {
@Override
public void onSuccess(@Nullable Object result) {
pair.lhs.set(result);
}
@Override
public void onFailure(Throwable t) {
pair.lhs.setException(t);
}
});
}
}
return task instanceof Callable ? delegate.submit((Callable) task) : (ListenableFuture<T>) delegate.submit((Runnable) task);
}
@Override
public <T> ListenableFuture<T> submit(Callable<T> task) {
return maybeSubmitTask(task, true);
}
@Override
public ListenableFuture<?> submit(Runnable task) {
if (task instanceof DrainTask) {
return maybeSubmitTask(task, false);
} else {
return maybeSubmitTask(task, true);
}
}
};
client = makeClient(randomizingExecutorService);
// callback to be run every time a query run is complete, to ensure all background
// caching tasks are executed, and cache is populated before we move onto the next query
queryCompletedCallback = new Runnable() {
@Override
public void run() {
try {
randomizingExecutorService.submit(new DrainTask() {
@Override
public void run() {
// no-op
}
}).get();
} catch (Exception e) {
Throwables.propagate(e);
}
}
};
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
testQueryCaching(runner, builder.build(), new Interval("2011-01-05/2011-01-10"), makeTimeResults(new DateTime("2011-01-05"), 85, 102, new DateTime("2011-01-06"), 412, 521, new DateTime("2011-01-07"), 122, 21894, new DateTime("2011-01-08"), 5, 20, new DateTime("2011-01-09"), 18, 521), new Interval("2011-01-10/2011-01-13"), makeTimeResults(new DateTime("2011-01-10"), 85, 102, new DateTime("2011-01-11"), 412, 521, new DateTime("2011-01-12"), 122, 21894));
}
use of io.druid.query.QueryRunner in project druid by druid-io.
the class CachingClusteredClientTest method testTimeseriesMergingOutOfOrderPartitions.
@Test
public void testTimeseriesMergingOutOfOrderPartitions() throws Exception {
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
testQueryCaching(runner, builder.build(), new Interval("2011-01-05/2011-01-10"), makeTimeResults(new DateTime("2011-01-05T02"), 80, 100, new DateTime("2011-01-06T02"), 420, 520, new DateTime("2011-01-07T02"), 12, 2194, new DateTime("2011-01-08T02"), 59, 201, new DateTime("2011-01-09T02"), 181, 52), new Interval("2011-01-05/2011-01-10"), makeTimeResults(new DateTime("2011-01-05T00"), 85, 102, new DateTime("2011-01-06T00"), 412, 521, new DateTime("2011-01-07T00"), 122, 21894, new DateTime("2011-01-08T00"), 5, 20, new DateTime("2011-01-09T00"), 18, 521));
TestHelper.assertExpectedResults(makeRenamedTimeResults(new DateTime("2011-01-05T00"), 85, 102, new DateTime("2011-01-05T02"), 80, 100, new DateTime("2011-01-06T00"), 412, 521, new DateTime("2011-01-06T02"), 420, 520, new DateTime("2011-01-07T00"), 122, 21894, new DateTime("2011-01-07T02"), 12, 2194, new DateTime("2011-01-08T00"), 5, 20, new DateTime("2011-01-08T02"), 59, 201, new DateTime("2011-01-09T00"), 18, 521, new DateTime("2011-01-09T02"), 181, 52), runner.run(builder.intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).build(), Maps.newHashMap()));
}
use of io.druid.query.QueryRunner in project druid by druid-io.
the class CachingClusteredClientTest method testTimeseriesCaching.
@Test
@SuppressWarnings("unchecked")
public void testTimeseriesCaching() throws Exception {
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT);
QueryRunner runner = new FinalizeResultsQueryRunner(client, new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
testQueryCaching(runner, builder.build(), new Interval("2011-01-01/2011-01-02"), makeTimeResults(new DateTime("2011-01-01"), 50, 5000), new Interval("2011-01-02/2011-01-03"), makeTimeResults(new DateTime("2011-01-02"), 30, 6000), new Interval("2011-01-04/2011-01-05"), makeTimeResults(new DateTime("2011-01-04"), 23, 85312), new Interval("2011-01-05/2011-01-10"), makeTimeResults(new DateTime("2011-01-05"), 85, 102, new DateTime("2011-01-06"), 412, 521, new DateTime("2011-01-07"), 122, 21894, new DateTime("2011-01-08"), 5, 20, new DateTime("2011-01-09"), 18, 521), new Interval("2011-01-05/2011-01-10"), makeTimeResults(new DateTime("2011-01-05T01"), 80, 100, new DateTime("2011-01-06T01"), 420, 520, new DateTime("2011-01-07T01"), 12, 2194, new DateTime("2011-01-08T01"), 59, 201, new DateTime("2011-01-09T01"), 181, 52));
HashMap<String, List> context = new HashMap<String, List>();
TestHelper.assertExpectedResults(makeRenamedTimeResults(new DateTime("2011-01-01"), 50, 5000, new DateTime("2011-01-02"), 30, 6000, new DateTime("2011-01-04"), 23, 85312, new DateTime("2011-01-05"), 85, 102, new DateTime("2011-01-05T01"), 80, 100, new DateTime("2011-01-06"), 412, 521, new DateTime("2011-01-06T01"), 420, 520, new DateTime("2011-01-07"), 122, 21894, new DateTime("2011-01-07T01"), 12, 2194, new DateTime("2011-01-08"), 5, 20, new DateTime("2011-01-08T01"), 59, 201, new DateTime("2011-01-09"), 18, 521, new DateTime("2011-01-09T01"), 181, 52), runner.run(builder.intervals("2011-01-01/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).build(), context));
}
use of io.druid.query.QueryRunner in project druid by druid-io.
the class CachingClusteredClientTest method testSearchCachingRenamedOutput.
@Test
public void testSearchCachingRenamedOutput() throws Exception {
final Druids.SearchQueryBuilder builder = Druids.newSearchQueryBuilder().dataSource(DATA_SOURCE).filters(DIM_FILTER).granularity(GRANULARITY).limit(1000).intervals(SEG_SPEC).dimensions(Arrays.asList(TOP_DIM)).query("how").context(CONTEXT);
testQueryCaching(client, builder.build(), new Interval("2011-01-01/2011-01-02"), makeSearchResults(TOP_DIM, new DateTime("2011-01-01"), "how", 1, "howdy", 2, "howwwwww", 3, "howwy", 4), new Interval("2011-01-02/2011-01-03"), makeSearchResults(TOP_DIM, new DateTime("2011-01-02"), "how1", 1, "howdy1", 2, "howwwwww1", 3, "howwy1", 4), new Interval("2011-01-05/2011-01-10"), makeSearchResults(TOP_DIM, new DateTime("2011-01-05"), "how2", 1, "howdy2", 2, "howwwwww2", 3, "howww2", 4, new DateTime("2011-01-06"), "how3", 1, "howdy3", 2, "howwwwww3", 3, "howww3", 4, new DateTime("2011-01-07"), "how4", 1, "howdy4", 2, "howwwwww4", 3, "howww4", 4, new DateTime("2011-01-08"), "how5", 1, "howdy5", 2, "howwwwww5", 3, "howww5", 4, new DateTime("2011-01-09"), "how6", 1, "howdy6", 2, "howwwwww6", 3, "howww6", 4), new Interval("2011-01-05/2011-01-10"), makeSearchResults(TOP_DIM, new DateTime("2011-01-05T01"), "how2", 1, "howdy2", 2, "howwwwww2", 3, "howww2", 4, new DateTime("2011-01-06T01"), "how3", 1, "howdy3", 2, "howwwwww3", 3, "howww3", 4, new DateTime("2011-01-07T01"), "how4", 1, "howdy4", 2, "howwwwww4", 3, "howww4", 4, new DateTime("2011-01-08T01"), "how5", 1, "howdy5", 2, "howwwwww5", 3, "howww5", 4, new DateTime("2011-01-09T01"), "how6", 1, "howdy6", 2, "howwwwww6", 3, "howww6", 4));
QueryRunner runner = new FinalizeResultsQueryRunner(client, new SearchQueryQueryToolChest(new SearchQueryConfig(), QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator()));
HashMap<String, Object> context = new HashMap<String, Object>();
TestHelper.assertExpectedResults(makeSearchResults(TOP_DIM, new DateTime("2011-01-01"), "how", 1, "howdy", 2, "howwwwww", 3, "howwy", 4, new DateTime("2011-01-02"), "how1", 1, "howdy1", 2, "howwwwww1", 3, "howwy1", 4, new DateTime("2011-01-05"), "how2", 1, "howdy2", 2, "howwwwww2", 3, "howww2", 4, new DateTime("2011-01-05T01"), "how2", 1, "howdy2", 2, "howwwwww2", 3, "howww2", 4, new DateTime("2011-01-06"), "how3", 1, "howdy3", 2, "howwwwww3", 3, "howww3", 4, new DateTime("2011-01-06T01"), "how3", 1, "howdy3", 2, "howwwwww3", 3, "howww3", 4, new DateTime("2011-01-07"), "how4", 1, "howdy4", 2, "howwwwww4", 3, "howww4", 4, new DateTime("2011-01-07T01"), "how4", 1, "howdy4", 2, "howwwwww4", 3, "howww4", 4, new DateTime("2011-01-08"), "how5", 1, "howdy5", 2, "howwwwww5", 3, "howww5", 4, new DateTime("2011-01-08T01"), "how5", 1, "howdy5", 2, "howwwwww5", 3, "howww5", 4, new DateTime("2011-01-09"), "how6", 1, "howdy6", 2, "howwwwww6", 3, "howww6", 4, new DateTime("2011-01-09T01"), "how6", 1, "howdy6", 2, "howwwwww6", 3, "howww6", 4), runner.run(builder.intervals("2011-01-01/2011-01-10").build(), context));
TestHelper.assertExpectedResults(makeSearchResults("new_dim", new DateTime("2011-01-01"), "how", 1, "howdy", 2, "howwwwww", 3, "howwy", 4, new DateTime("2011-01-02"), "how1", 1, "howdy1", 2, "howwwwww1", 3, "howwy1", 4, new DateTime("2011-01-05"), "how2", 1, "howdy2", 2, "howwwwww2", 3, "howww2", 4, new DateTime("2011-01-05T01"), "how2", 1, "howdy2", 2, "howwwwww2", 3, "howww2", 4, new DateTime("2011-01-06"), "how3", 1, "howdy3", 2, "howwwwww3", 3, "howww3", 4, new DateTime("2011-01-06T01"), "how3", 1, "howdy3", 2, "howwwwww3", 3, "howww3", 4, new DateTime("2011-01-07"), "how4", 1, "howdy4", 2, "howwwwww4", 3, "howww4", 4, new DateTime("2011-01-07T01"), "how4", 1, "howdy4", 2, "howwwwww4", 3, "howww4", 4, new DateTime("2011-01-08"), "how5", 1, "howdy5", 2, "howwwwww5", 3, "howww5", 4, new DateTime("2011-01-08T01"), "how5", 1, "howdy5", 2, "howwwwww5", 3, "howww5", 4, new DateTime("2011-01-09"), "how6", 1, "howdy6", 2, "howwwwww6", 3, "howww6", 4, new DateTime("2011-01-09T01"), "how6", 1, "howdy6", 2, "howwwwww6", 3, "howww6", 4), runner.run(builder.intervals("2011-01-01/2011-01-10").dimensions(new DefaultDimensionSpec(TOP_DIM, "new_dim")).build(), context));
}
Aggregations