use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class CachingQueryRunnerTest method testTimeseries.
@Test
public void testTimeseries() throws Exception {
for (boolean descending : new boolean[] { false, true }) {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(QueryRunnerTestHelper.DATA_SOURCE).granularity(QueryRunnerTestHelper.DAY_GRAN).intervals(QueryRunnerTestHelper.FIRST_TO_THIRD).aggregators(Arrays.asList(QueryRunnerTestHelper.ROWS_COUNT, new LongSumAggregatorFactory("idx", "index"), QueryRunnerTestHelper.QUALITY_UNIQUES)).descending(descending).build();
Result row1 = new Result(DateTimes.of("2011-04-01"), new TimeseriesResultValue(ImmutableMap.of("rows", 13L, "idx", 6619L, "uniques", QueryRunnerTestHelper.UNIQUES_9)));
Result row2 = new Result<>(DateTimes.of("2011-04-02"), new TimeseriesResultValue(ImmutableMap.of("rows", 13L, "idx", 5827L, "uniques", QueryRunnerTestHelper.UNIQUES_9)));
List<Result> expectedResults;
if (descending) {
expectedResults = Lists.newArrayList(row2, row1);
} else {
expectedResults = Lists.newArrayList(row1, row2);
}
QueryToolChest toolChest = new TimeseriesQueryQueryToolChest();
testCloseAndPopulate(expectedResults, expectedResults, query, toolChest);
testUseCache(expectedResults, query, toolChest);
}
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class CachingClusteredClientTestUtils method createWarehouse.
/**
* Returns a new {@link QueryToolChestWarehouse} for unit tests and a resourceCloser which should be closed at the end
* of the test.
*/
public static Pair<QueryToolChestWarehouse, Closer> createWarehouse(ObjectMapper objectMapper) {
final Pair<GroupByQueryRunnerFactory, Closer> factoryCloserPair = GroupByQueryRunnerTest.makeQueryRunnerFactory(new GroupByQueryConfig());
final GroupByQueryRunnerFactory factory = factoryCloserPair.lhs;
final Closer resourceCloser = factoryCloserPair.rhs;
return Pair.of(new MapQueryToolChestWarehouse(ImmutableMap.<Class<? extends Query>, QueryToolChest>builder().put(TimeseriesQuery.class, new TimeseriesQueryQueryToolChest()).put(TopNQuery.class, new TopNQueryQueryToolChest(new TopNQueryConfig())).put(SearchQuery.class, new SearchQueryQueryToolChest(new SearchQueryConfig())).put(GroupByQuery.class, factory.getToolchest()).put(TimeBoundaryQuery.class, new TimeBoundaryQueryQueryToolChest()).build()), resourceCloser);
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class CachingClusteredClientTest method testOutOfOrderBackgroundCachePopulation.
@Test
public void testOutOfOrderBackgroundCachePopulation() {
// to trigger the actual execution when we are ready to shuffle the order.
abstract class DrainTask implements Runnable {
}
final ForwardingListeningExecutorService randomizingExecutorService = new ForwardingListeningExecutorService() {
final ConcurrentLinkedDeque<Pair<SettableFuture, Object>> taskQueue = new ConcurrentLinkedDeque<>();
final ListeningExecutorService delegate = MoreExecutors.listeningDecorator(// are complete before moving on to the next query run.
Execs.directExecutor());
@Override
protected ListeningExecutorService delegate() {
return delegate;
}
private <T> ListenableFuture<T> maybeSubmitTask(Object task, boolean wait) {
if (wait) {
SettableFuture<T> future = SettableFuture.create();
taskQueue.addFirst(Pair.of(future, task));
return future;
} else {
List<Pair<SettableFuture, Object>> tasks = Lists.newArrayList(taskQueue.iterator());
Collections.shuffle(tasks, new Random(0));
for (final Pair<SettableFuture, Object> pair : tasks) {
ListenableFuture future = pair.rhs instanceof Callable ? delegate.submit((Callable) pair.rhs) : delegate.submit((Runnable) pair.rhs);
Futures.addCallback(future, new FutureCallback() {
@Override
public void onSuccess(@Nullable Object result) {
pair.lhs.set(result);
}
@Override
public void onFailure(Throwable t) {
pair.lhs.setException(t);
}
});
}
}
return task instanceof Callable ? delegate.submit((Callable) task) : (ListenableFuture<T>) delegate.submit((Runnable) task);
}
@SuppressWarnings("ParameterPackage")
@Override
public <T> ListenableFuture<T> submit(Callable<T> task) {
return maybeSubmitTask(task, true);
}
@Override
public ListenableFuture<?> submit(Runnable task) {
if (task instanceof DrainTask) {
return maybeSubmitTask(task, false);
} else {
return maybeSubmitTask(task, true);
}
}
};
client = makeClient(new BackgroundCachePopulator(randomizingExecutorService, JSON_MAPPER, new CachePopulatorStats(), -1));
// callback to be run every time a query run is complete, to ensure all background
// caching tasks are executed, and cache is populated before we move onto the next query
queryCompletedCallback = new Runnable() {
@Override
public void run() {
try {
randomizingExecutorService.submit(new DrainTask() {
@Override
public void run() {
// no-op
}
}).get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
};
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).intervals(SEG_SPEC).filters(DIM_FILTER).granularity(GRANULARITY).aggregators(AGGS).postAggregators(POST_AGGS).context(CONTEXT).randomQueryId();
QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
testQueryCaching(runner, builder.build(), Intervals.of("2011-01-05/2011-01-10"), makeTimeResults(DateTimes.of("2011-01-05"), 85, 102, DateTimes.of("2011-01-06"), 412, 521, DateTimes.of("2011-01-07"), 122, 21894, DateTimes.of("2011-01-08"), 5, 20, DateTimes.of("2011-01-09"), 18, 521), Intervals.of("2011-01-10/2011-01-13"), makeTimeResults(DateTimes.of("2011-01-10"), 85, 102, DateTimes.of("2011-01-11"), 412, 521, DateTimes.of("2011-01-12"), 122, 21894));
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class CachingClusteredClientTest method testNoSegmentPruningForHashPartitionedSegments.
private void testNoSegmentPruningForHashPartitionedSegments(boolean enableSegmentPruning, @Nullable HashPartitionFunction partitionFunction, boolean useEmptyPartitionDimensions) {
DimFilter filter = new AndDimFilter(new SelectorDimFilter("dim1", "a", null), new BoundDimFilter("dim2", "e", "zzz", true, true, false, null, StringComparators.LEXICOGRAPHIC), // Equivalent filter of dim3 below is InDimFilter("dim3", Arrays.asList("c"), null)
new AndDimFilter(new InDimFilter("dim3", Arrays.asList("a", "c", "e", "g"), null), new BoundDimFilter("dim3", "aaa", "ddd", false, false, false, null, StringComparators.LEXICOGRAPHIC)));
final Map<String, Object> context = new HashMap<>(CONTEXT);
context.put(QueryContexts.SECONDARY_PARTITION_PRUNING_KEY, enableSegmentPruning);
final Druids.TimeseriesQueryBuilder builder = Druids.newTimeseriesQueryBuilder().dataSource(DATA_SOURCE).filters(filter).granularity(GRANULARITY).intervals(SEG_SPEC).intervals("2011-01-05/2011-01-10").aggregators(RENAMED_AGGS).postAggregators(RENAMED_POST_AGGS).context(context).randomQueryId();
TimeseriesQuery query = builder.build();
QueryRunner runner = new FinalizeResultsQueryRunner(getDefaultQueryRunner(), new TimeseriesQueryQueryToolChest());
final Interval interval1 = Intervals.of("2011-01-06/2011-01-07");
final Interval interval2 = Intervals.of("2011-01-07/2011-01-08");
final Interval interval3 = Intervals.of("2011-01-08/2011-01-09");
final DruidServer lastServer = servers[random.nextInt(servers.length)];
List<String> partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1");
final int numPartitions1 = 6;
for (int i = 0; i < numPartitions1; i++) {
ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions1);
timeline.add(interval1, "v", new NumberedPartitionChunk<>(i, numPartitions1, selector));
}
partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim2");
final int numPartitions2 = 3;
for (int i = 0; i < numPartitions2; i++) {
ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions2);
timeline.add(interval2, "v", new NumberedPartitionChunk<>(i, numPartitions2, selector));
}
partitionDimensions = useEmptyPartitionDimensions ? ImmutableList.of() : ImmutableList.of("dim1", "dim3");
final int numPartitions3 = 4;
for (int i = 0; i < numPartitions3; i++) {
ServerSelector selector = makeMockHashBasedSelector(lastServer, partitionDimensions, partitionFunction, i, numPartitions3);
timeline.add(interval3, "v", new NumberedPartitionChunk<>(i, numPartitions3, selector));
}
final Capture<QueryPlus> capture = Capture.newInstance();
final Capture<ResponseContext> contextCap = Capture.newInstance();
QueryRunner mockRunner = EasyMock.createNiceMock(QueryRunner.class);
EasyMock.expect(mockRunner.run(EasyMock.capture(capture), EasyMock.capture(contextCap))).andReturn(Sequences.empty()).anyTimes();
EasyMock.expect(serverView.getQueryRunner(lastServer)).andReturn(mockRunner).anyTimes();
EasyMock.replay(serverView);
EasyMock.replay(mockRunner);
// Expected to read all segments
Set<SegmentDescriptor> expcetedDescriptors = new HashSet<>();
IntStream.range(0, numPartitions1).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval1, "v", i)));
IntStream.range(0, numPartitions2).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval2, "v", i)));
IntStream.range(0, numPartitions3).forEach(i -> expcetedDescriptors.add(new SegmentDescriptor(interval3, "v", i)));
runner.run(QueryPlus.wrap(query)).toList();
QuerySegmentSpec querySegmentSpec = ((TimeseriesQuery) capture.getValue().getQuery()).getQuerySegmentSpec();
Assert.assertSame(MultipleSpecificSegmentSpec.class, querySegmentSpec.getClass());
final Set<SegmentDescriptor> actualDescriptors = new HashSet<>(((MultipleSpecificSegmentSpec) querySegmentSpec).getDescriptors());
Assert.assertEquals(expcetedDescriptors, actualDescriptors);
}
use of org.apache.druid.query.timeseries.TimeseriesQueryQueryToolChest in project druid by druid-io.
the class AggregatorFactoryTest method testResultArraySignature.
@Test
public void testResultArraySignature() {
final TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource("dummy").intervals("2000/3000").granularity(Granularities.HOUR).aggregators(new CountAggregatorFactory("count"), new JavaScriptAggregatorFactory("js", ImmutableList.of("col"), "function(a,b) { return a + b; }", "function() { return 0; }", "function(a,b) { return a + b }", new JavaScriptConfig(true)), // long aggs
new LongSumAggregatorFactory("longSum", "long-col"), new LongMinAggregatorFactory("longMin", "long-col"), new LongMaxAggregatorFactory("longMax", "long-col"), new LongFirstAggregatorFactory("longFirst", "long-col", null), new LongLastAggregatorFactory("longLast", "long-col", null), new LongAnyAggregatorFactory("longAny", "long-col"), // double aggs
new DoubleSumAggregatorFactory("doubleSum", "double-col"), new DoubleMinAggregatorFactory("doubleMin", "double-col"), new DoubleMaxAggregatorFactory("doubleMax", "double-col"), new DoubleFirstAggregatorFactory("doubleFirst", "double-col", null), new DoubleLastAggregatorFactory("doubleLast", "double-col", null), new DoubleAnyAggregatorFactory("doubleAny", "double-col"), new DoubleMeanAggregatorFactory("doubleMean", "double-col"), // float aggs
new FloatSumAggregatorFactory("floatSum", "float-col"), new FloatMinAggregatorFactory("floatMin", "float-col"), new FloatMaxAggregatorFactory("floatMax", "float-col"), new FloatFirstAggregatorFactory("floatFirst", "float-col", null), new FloatLastAggregatorFactory("floatLast", "float-col", null), new FloatAnyAggregatorFactory("floatAny", "float-col"), // string aggregators
new StringFirstAggregatorFactory("stringFirst", "col", null, 1024), new StringLastAggregatorFactory("stringLast", "col", null, 1024), new StringAnyAggregatorFactory("stringAny", "col", 1024), // sketch aggs
new CardinalityAggregatorFactory("cardinality", ImmutableList.of(DefaultDimensionSpec.of("some-col")), false), new HyperUniquesAggregatorFactory("hyperUnique", "hyperunique"), new HistogramAggregatorFactory("histogram", "histogram", ImmutableList.of(0.25f, 0.5f, 0.75f)), // delegate aggs
new FilteredAggregatorFactory(new HyperUniquesAggregatorFactory("filtered", "hyperunique"), new SelectorDimFilter("col", "hello", null)), new SuppressedAggregatorFactory(new HyperUniquesAggregatorFactory("suppressed", "hyperunique"))).postAggregators(new FinalizingFieldAccessPostAggregator("count-finalize", "count"), new FinalizingFieldAccessPostAggregator("js-finalize", "js"), // long aggs
new FinalizingFieldAccessPostAggregator("longSum-finalize", "longSum"), new FinalizingFieldAccessPostAggregator("longMin-finalize", "longMin"), new FinalizingFieldAccessPostAggregator("longMax-finalize", "longMax"), new FinalizingFieldAccessPostAggregator("longFirst-finalize", "longFirst"), new FinalizingFieldAccessPostAggregator("longLast-finalize", "longLast"), new FinalizingFieldAccessPostAggregator("longAny-finalize", "longAny"), // double
new FinalizingFieldAccessPostAggregator("doubleSum-finalize", "doubleSum"), new FinalizingFieldAccessPostAggregator("doubleMin-finalize", "doubleMin"), new FinalizingFieldAccessPostAggregator("doubleMax-finalize", "doubleMax"), new FinalizingFieldAccessPostAggregator("doubleFirst-finalize", "doubleFirst"), new FinalizingFieldAccessPostAggregator("doubleLast-finalize", "doubleLast"), new FinalizingFieldAccessPostAggregator("doubleAny-finalize", "doubleAny"), new FinalizingFieldAccessPostAggregator("doubleMean-finalize", "doubleMean"), // finalized floats
new FinalizingFieldAccessPostAggregator("floatSum-finalize", "floatSum"), new FinalizingFieldAccessPostAggregator("floatMin-finalize", "floatMin"), new FinalizingFieldAccessPostAggregator("floatMax-finalize", "floatMax"), new FinalizingFieldAccessPostAggregator("floatFirst-finalize", "floatFirst"), new FinalizingFieldAccessPostAggregator("floatLast-finalize", "floatLast"), new FinalizingFieldAccessPostAggregator("floatAny-finalize", "floatAny"), // finalized strings
new FinalizingFieldAccessPostAggregator("stringFirst-finalize", "stringFirst"), new FinalizingFieldAccessPostAggregator("stringLast-finalize", "stringLast"), new FinalizingFieldAccessPostAggregator("stringAny-finalize", "stringAny"), // finalized sketch
new FinalizingFieldAccessPostAggregator("cardinality-finalize", "cardinality"), new FinalizingFieldAccessPostAggregator("hyperUnique-finalize", "hyperUnique"), new FinalizingFieldAccessPostAggregator("histogram-finalize", "histogram"), // finalized delegate
new FinalizingFieldAccessPostAggregator("filtered-finalize", "filtered"), new FinalizingFieldAccessPostAggregator("suppressed-finalize", "suppressed")).build();
Assert.assertEquals(RowSignature.builder().addTimeColumn().add("count", ColumnType.LONG).add("js", ColumnType.FLOAT).add("longSum", ColumnType.LONG).add("longMin", ColumnType.LONG).add("longMax", ColumnType.LONG).add("longFirst", ColumnType.LONG).add("longLast", ColumnType.LONG).add("longAny", ColumnType.LONG).add("doubleSum", ColumnType.DOUBLE).add("doubleMin", ColumnType.DOUBLE).add("doubleMax", ColumnType.DOUBLE).add("doubleFirst", ColumnType.DOUBLE).add("doubleLast", ColumnType.DOUBLE).add("doubleAny", ColumnType.DOUBLE).add("doubleMean", null).add("floatSum", ColumnType.FLOAT).add("floatMin", ColumnType.FLOAT).add("floatMax", ColumnType.FLOAT).add("floatFirst", ColumnType.FLOAT).add("floatLast", ColumnType.FLOAT).add("floatAny", ColumnType.FLOAT).add("stringFirst", null).add("stringLast", null).add("stringAny", ColumnType.STRING).add("cardinality", null).add("hyperUnique", null).add("histogram", null).add("filtered", null).add("suppressed", null).add("count-finalize", ColumnType.LONG).add("js-finalize", ColumnType.FLOAT).add("longSum-finalize", ColumnType.LONG).add("longMin-finalize", ColumnType.LONG).add("longMax-finalize", ColumnType.LONG).add("longFirst-finalize", ColumnType.LONG).add("longLast-finalize", ColumnType.LONG).add("longAny-finalize", ColumnType.LONG).add("doubleSum-finalize", ColumnType.DOUBLE).add("doubleMin-finalize", ColumnType.DOUBLE).add("doubleMax-finalize", ColumnType.DOUBLE).add("doubleFirst-finalize", ColumnType.DOUBLE).add("doubleLast-finalize", ColumnType.DOUBLE).add("doubleAny-finalize", ColumnType.DOUBLE).add("doubleMean-finalize", ColumnType.DOUBLE).add("floatSum-finalize", ColumnType.FLOAT).add("floatMin-finalize", ColumnType.FLOAT).add("floatMax-finalize", ColumnType.FLOAT).add("floatFirst-finalize", ColumnType.FLOAT).add("floatLast-finalize", ColumnType.FLOAT).add("floatAny-finalize", ColumnType.FLOAT).add("stringFirst-finalize", ColumnType.STRING).add("stringLast-finalize", ColumnType.STRING).add("stringAny-finalize", ColumnType.STRING).add("cardinality-finalize", ColumnType.DOUBLE).add("hyperUnique-finalize", ColumnType.DOUBLE).add("histogram-finalize", HistogramAggregatorFactory.TYPE_VISUAL).add("filtered-finalize", ColumnType.DOUBLE).add("suppressed-finalize", ColumnType.DOUBLE).build(), new TimeseriesQueryQueryToolChest().resultArraySignature(query));
}
Aggregations