use of io.druid.query.TableDataSource in project druid by druid-io.
the class TopNQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy = new TopNQueryQueryToolChest(null, null).getCacheStrategy(new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test"), new NumericTopNMetricSpec("metric1"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.<PostAggregator>of(new ConstantPostAggregator("post", 10)), null));
final Result<TopNResultValue> result = new Result<>(// test timestamps that result in integer size millis
new DateTime(123L), new TopNResultValue(Arrays.asList(ImmutableMap.<String, Object>of("test", "val1", "metric1", 2))));
Object preparedValue = strategy.prepareForCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TopNResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class TimeSeriesUnionQueryRunnerTest method testUnionResultMerging.
@Test
public void testUnionResultMerging() {
TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(new UnionDataSource(Lists.newArrayList(new TableDataSource("ds1"), new TableDataSource("ds2")))).granularity(QueryRunnerTestHelper.dayGran).intervals(QueryRunnerTestHelper.firstToThird).aggregators(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"))).descending(descending).build();
QueryToolChest toolChest = new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator());
final List<Result<TimeseriesResultValue>> ds1 = Lists.newArrayList(new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 1L, "idx", 2L))), new Result<>(new DateTime("2011-04-03"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 3L, "idx", 4L))));
final List<Result<TimeseriesResultValue>> ds2 = Lists.newArrayList(new Result<>(new DateTime("2011-04-01"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 5L, "idx", 6L))), new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 7L, "idx", 8L))), new Result<>(new DateTime("2011-04-04"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 9L, "idx", 10L))));
QueryRunner mergingrunner = toolChest.mergeResults(new UnionQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {
@Override
public Sequence<Result<TimeseriesResultValue>> run(Query<Result<TimeseriesResultValue>> query, Map<String, Object> responseContext) {
if (query.getDataSource().equals(new TableDataSource("ds1"))) {
return Sequences.simple(descending ? Lists.reverse(ds1) : ds1);
} else {
return Sequences.simple(descending ? Lists.reverse(ds2) : ds2);
}
}
}));
List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-04-01"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 5L, "idx", 6L))), new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 8L, "idx", 10L))), new Result<>(new DateTime("2011-04-03"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 3L, "idx", 4L))), new Result<>(new DateTime("2011-04-04"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 9L, "idx", 10L))));
Iterable<Result<TimeseriesResultValue>> results = Sequences.toList(mergingrunner.run(query, Maps.<String, Object>newHashMap()), Lists.<Result<TimeseriesResultValue>>newArrayList());
assertExpectedResults(expectedResults, results);
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class TimeseriesQueryQueryToolChestTest method testCacheStrategy.
@Test
public void testCacheStrategy() throws Exception {
CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery> strategy = new TimeseriesQueryQueryToolChest(null).getCacheStrategy(new TimeseriesQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), descending, VirtualColumns.EMPTY, null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), null, null));
final Result<TimeseriesResultValue> result = new Result<>(// test timestamps that result in integer size millis
new DateTime(123L), new TimeseriesResultValue(ImmutableMap.<String, Object>of("metric1", 2)));
Object preparedValue = strategy.prepareForCache().apply(result);
ObjectMapper objectMapper = new DefaultObjectMapper();
Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
Result<TimeseriesResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
Assert.assertEquals(result, fromCacheResult);
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class SelectQuerySpecTest method testPagingSpecFromNext.
@Test
public void testPagingSpecFromNext() throws Exception {
String baseQueryJson = "{\"queryType\":\"select\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"descending\":true," + "\"filter\":null," + "\"granularity\":{\"type\":\"all\"}," + "\"dimensions\":" + "[{\"type\":\"default\",\"dimension\":\"market\",\"outputName\":\"market\",\"outputType\":\"STRING\"}," + "{\"type\":\"default\",\"dimension\":\"quality\",\"outputName\":\"quality\",\"outputType\":\"STRING\"}]," + "\"metrics\":[\"index\"]," + "\"virtualColumns\":[],";
String withNull = baseQueryJson + "\"pagingSpec\":{\"pagingIdentifiers\":{},\"threshold\":3,\"fromNext\":null}," + "\"context\":null}";
String withFalse = baseQueryJson + "\"pagingSpec\":{\"pagingIdentifiers\":{},\"threshold\":3,\"fromNext\":false}," + "\"context\":null}";
String withTrue = baseQueryJson + "\"pagingSpec\":{\"pagingIdentifiers\":{},\"threshold\":3,\"fromNext\":true}," + "\"context\":null}";
SelectQuery queryWithNull = new SelectQuery(new TableDataSource(QueryRunnerTestHelper.dataSource), new LegacySegmentSpec(new Interval("2011-01-12/2011-01-14")), true, null, QueryRunnerTestHelper.allGran, DefaultDimensionSpec.toSpec(Arrays.<String>asList("market", "quality")), Arrays.<String>asList("index"), null, new PagingSpec(null, 3, null), null);
SelectQuery queryWithFalse = queryWithNull.withPagingSpec(new PagingSpec(null, 3, false));
SelectQuery queryWithTrue = queryWithNull.withPagingSpec(new PagingSpec(null, 3, true));
String actualWithNull = objectMapper.writeValueAsString(queryWithNull);
Assert.assertEquals(withTrue, actualWithNull);
String actualWithFalse = objectMapper.writeValueAsString(queryWithFalse);
Assert.assertEquals(withFalse, actualWithFalse);
String actualWithTrue = objectMapper.writeValueAsString(queryWithTrue);
Assert.assertEquals(withTrue, actualWithTrue);
Assert.assertEquals(queryWithNull, objectMapper.readValue(actualWithNull, SelectQuery.class));
Assert.assertEquals(queryWithFalse, objectMapper.readValue(actualWithFalse, SelectQuery.class));
Assert.assertEquals(queryWithTrue, objectMapper.readValue(actualWithTrue, SelectQuery.class));
}
use of io.druid.query.TableDataSource in project druid by druid-io.
the class SinkQuerySegmentWalker method getQueryRunnerForSegments.
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) {
// We only handle one particular dataSource. Make sure that's what we have, then ignore from here on out.
if (!(query.getDataSource() instanceof TableDataSource) || !dataSource.equals(((TableDataSource) query.getDataSource()).getName())) {
log.makeAlert("Received query for unknown dataSource").addData("dataSource", query.getDataSource()).emit();
return new NoopQueryRunner<>();
}
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
throw new ISE("Unknown query type[%s].", query.getClass());
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {
@Override
public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
return toolChest.makeMetricBuilder(query);
}
};
final boolean skipIncrementalSegment = query.getContextValue(CONTEXT_SKIP_INCREMENTAL_SEGMENT, false);
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
return CPUTimeMetricQueryRunner.safeBuild(toolChest.mergeResults(factory.mergeRunners(queryExecutorService, FunctionalIterable.create(specs).transform(new Function<SegmentDescriptor, QueryRunner<T>>() {
@Override
public QueryRunner<T> apply(final SegmentDescriptor descriptor) {
final PartitionHolder<Sink> holder = sinkTimeline.findEntry(descriptor.getInterval(), descriptor.getVersion());
if (holder == null) {
return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
}
final PartitionChunk<Sink> chunk = holder.getChunk(descriptor.getPartitionNumber());
if (chunk == null) {
return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
}
final Sink theSink = chunk.getObject();
final String sinkSegmentIdentifier = theSink.getSegment().getIdentifier();
return new SpecificSegmentQueryRunner<>(withPerSinkMetrics(new BySegmentQueryRunner<>(sinkSegmentIdentifier, descriptor.getInterval().getStart(), factory.mergeRunners(MoreExecutors.sameThreadExecutor(), Iterables.transform(theSink, new Function<FireHydrant, QueryRunner<T>>() {
@Override
public QueryRunner<T> apply(final FireHydrant hydrant) {
// Hydrant might swap at any point, but if it's swapped at the start
// then we know it's *definitely* swapped.
final boolean hydrantDefinitelySwapped = hydrant.hasSwapped();
if (skipIncrementalSegment && !hydrantDefinitelySwapped) {
return new NoopQueryRunner<>();
}
// Prevent the underlying segment from swapping when its being iterated
final Pair<Segment, Closeable> segment = hydrant.getAndIncrementSegment();
try {
QueryRunner<T> baseRunner = QueryRunnerHelper.makeClosingQueryRunner(factory.createRunner(segment.lhs), segment.rhs);
// 2) Hydrants are not the same between replicas, make sure cache is local
if (hydrantDefinitelySwapped && cache.isLocal()) {
return new CachingQueryRunner<>(makeHydrantCacheIdentifier(hydrant), descriptor, objectMapper, cache, toolChest, baseRunner, MoreExecutors.sameThreadExecutor(), cacheConfig);
} else {
return baseRunner;
}
} catch (RuntimeException e) {
CloseQuietly.close(segment.rhs);
throw e;
}
}
}))), builderFn, sinkSegmentIdentifier, cpuTimeAccumulator), new SpecificSegmentSpec(descriptor));
}
}))), builderFn, emitter, cpuTimeAccumulator, true);
}
Aggregations