Search in sources :

Example 21 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class TopNQueryQueryToolChestTest method testCacheStrategy.

@Test
public void testCacheStrategy() throws Exception {
    CacheStrategy<Result<TopNResultValue>, Object, TopNQuery> strategy = new TopNQueryQueryToolChest(null, null).getCacheStrategy(new TopNQuery(new TableDataSource("dummy"), VirtualColumns.EMPTY, new DefaultDimensionSpec("test", "test"), new NumericTopNMetricSpec("metric1"), 3, new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), ImmutableList.<PostAggregator>of(new ConstantPostAggregator("post", 10)), null));
    final Result<TopNResultValue> result = new Result<>(// test timestamps that result in integer size millis
    new DateTime(123L), new TopNResultValue(Arrays.asList(ImmutableMap.<String, Object>of("test", "val1", "metric1", 2))));
    Object preparedValue = strategy.prepareForCache().apply(result);
    ObjectMapper objectMapper = new DefaultObjectMapper();
    Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
    Result<TopNResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
    Assert.assertEquals(result, fromCacheResult);
}
Also used : PostAggregator(io.druid.query.aggregation.PostAggregator) FieldAccessPostAggregator(io.druid.query.aggregation.post.FieldAccessPostAggregator) ArithmeticPostAggregator(io.druid.query.aggregation.post.ArithmeticPostAggregator) ConstantPostAggregator(io.druid.query.aggregation.post.ConstantPostAggregator) ConstantPostAggregator(io.druid.query.aggregation.post.ConstantPostAggregator) MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DefaultDimensionSpec(io.druid.query.dimension.DefaultDimensionSpec) DateTime(org.joda.time.DateTime) Result(io.druid.query.Result) TableDataSource(io.druid.query.TableDataSource) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 22 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class TimeSeriesUnionQueryRunnerTest method testUnionResultMerging.

@Test
public void testUnionResultMerging() {
    TimeseriesQuery query = Druids.newTimeseriesQueryBuilder().dataSource(new UnionDataSource(Lists.newArrayList(new TableDataSource("ds1"), new TableDataSource("ds2")))).granularity(QueryRunnerTestHelper.dayGran).intervals(QueryRunnerTestHelper.firstToThird).aggregators(Arrays.<AggregatorFactory>asList(QueryRunnerTestHelper.rowsCount, new LongSumAggregatorFactory("idx", "index"))).descending(descending).build();
    QueryToolChest toolChest = new TimeseriesQueryQueryToolChest(QueryRunnerTestHelper.NoopIntervalChunkingQueryRunnerDecorator());
    final List<Result<TimeseriesResultValue>> ds1 = Lists.newArrayList(new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 1L, "idx", 2L))), new Result<>(new DateTime("2011-04-03"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 3L, "idx", 4L))));
    final List<Result<TimeseriesResultValue>> ds2 = Lists.newArrayList(new Result<>(new DateTime("2011-04-01"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 5L, "idx", 6L))), new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 7L, "idx", 8L))), new Result<>(new DateTime("2011-04-04"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 9L, "idx", 10L))));
    QueryRunner mergingrunner = toolChest.mergeResults(new UnionQueryRunner<>(new QueryRunner<Result<TimeseriesResultValue>>() {

        @Override
        public Sequence<Result<TimeseriesResultValue>> run(Query<Result<TimeseriesResultValue>> query, Map<String, Object> responseContext) {
            if (query.getDataSource().equals(new TableDataSource("ds1"))) {
                return Sequences.simple(descending ? Lists.reverse(ds1) : ds1);
            } else {
                return Sequences.simple(descending ? Lists.reverse(ds2) : ds2);
            }
        }
    }));
    List<Result<TimeseriesResultValue>> expectedResults = Arrays.asList(new Result<>(new DateTime("2011-04-01"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 5L, "idx", 6L))), new Result<>(new DateTime("2011-04-02"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 8L, "idx", 10L))), new Result<>(new DateTime("2011-04-03"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 3L, "idx", 4L))), new Result<>(new DateTime("2011-04-04"), new TimeseriesResultValue(ImmutableMap.<String, Object>of("rows", 9L, "idx", 10L))));
    Iterable<Result<TimeseriesResultValue>> results = Sequences.toList(mergingrunner.run(query, Maps.<String, Object>newHashMap()), Lists.<Result<TimeseriesResultValue>>newArrayList());
    assertExpectedResults(expectedResults, results);
}
Also used : Query(io.druid.query.Query) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) QueryToolChest(io.druid.query.QueryToolChest) UnionDataSource(io.druid.query.UnionDataSource) DateTime(org.joda.time.DateTime) QueryRunner(io.druid.query.QueryRunner) UnionQueryRunner(io.druid.query.UnionQueryRunner) Result(io.druid.query.Result) TableDataSource(io.druid.query.TableDataSource) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 23 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class TimeseriesQueryQueryToolChestTest method testCacheStrategy.

@Test
public void testCacheStrategy() throws Exception {
    CacheStrategy<Result<TimeseriesResultValue>, Object, TimeseriesQuery> strategy = new TimeseriesQueryQueryToolChest(null).getCacheStrategy(new TimeseriesQuery(new TableDataSource("dummy"), new MultipleIntervalSegmentSpec(ImmutableList.of(new Interval("2015-01-01/2015-01-02"))), descending, VirtualColumns.EMPTY, null, Granularities.ALL, ImmutableList.<AggregatorFactory>of(new CountAggregatorFactory("metric1")), null, null));
    final Result<TimeseriesResultValue> result = new Result<>(// test timestamps that result in integer size millis
    new DateTime(123L), new TimeseriesResultValue(ImmutableMap.<String, Object>of("metric1", 2)));
    Object preparedValue = strategy.prepareForCache().apply(result);
    ObjectMapper objectMapper = new DefaultObjectMapper();
    Object fromCacheValue = objectMapper.readValue(objectMapper.writeValueAsBytes(preparedValue), strategy.getCacheObjectClazz());
    Result<TimeseriesResultValue> fromCacheResult = strategy.pullFromCache().apply(fromCacheValue);
    Assert.assertEquals(result, fromCacheResult);
}
Also used : MultipleIntervalSegmentSpec(io.druid.query.spec.MultipleIntervalSegmentSpec) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DateTime(org.joda.time.DateTime) Result(io.druid.query.Result) TableDataSource(io.druid.query.TableDataSource) CountAggregatorFactory(io.druid.query.aggregation.CountAggregatorFactory) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) ObjectMapper(com.fasterxml.jackson.databind.ObjectMapper) DefaultObjectMapper(io.druid.jackson.DefaultObjectMapper) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 24 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class SelectQuerySpecTest method testPagingSpecFromNext.

@Test
public void testPagingSpecFromNext() throws Exception {
    String baseQueryJson = "{\"queryType\":\"select\",\"dataSource\":{\"type\":\"table\",\"name\":\"testing\"}," + "\"intervals\":{\"type\":\"LegacySegmentSpec\",\"intervals\":[\"2011-01-12T00:00:00.000Z/2011-01-14T00:00:00.000Z\"]}," + "\"descending\":true," + "\"filter\":null," + "\"granularity\":{\"type\":\"all\"}," + "\"dimensions\":" + "[{\"type\":\"default\",\"dimension\":\"market\",\"outputName\":\"market\",\"outputType\":\"STRING\"}," + "{\"type\":\"default\",\"dimension\":\"quality\",\"outputName\":\"quality\",\"outputType\":\"STRING\"}]," + "\"metrics\":[\"index\"]," + "\"virtualColumns\":[],";
    String withNull = baseQueryJson + "\"pagingSpec\":{\"pagingIdentifiers\":{},\"threshold\":3,\"fromNext\":null}," + "\"context\":null}";
    String withFalse = baseQueryJson + "\"pagingSpec\":{\"pagingIdentifiers\":{},\"threshold\":3,\"fromNext\":false}," + "\"context\":null}";
    String withTrue = baseQueryJson + "\"pagingSpec\":{\"pagingIdentifiers\":{},\"threshold\":3,\"fromNext\":true}," + "\"context\":null}";
    SelectQuery queryWithNull = new SelectQuery(new TableDataSource(QueryRunnerTestHelper.dataSource), new LegacySegmentSpec(new Interval("2011-01-12/2011-01-14")), true, null, QueryRunnerTestHelper.allGran, DefaultDimensionSpec.toSpec(Arrays.<String>asList("market", "quality")), Arrays.<String>asList("index"), null, new PagingSpec(null, 3, null), null);
    SelectQuery queryWithFalse = queryWithNull.withPagingSpec(new PagingSpec(null, 3, false));
    SelectQuery queryWithTrue = queryWithNull.withPagingSpec(new PagingSpec(null, 3, true));
    String actualWithNull = objectMapper.writeValueAsString(queryWithNull);
    Assert.assertEquals(withTrue, actualWithNull);
    String actualWithFalse = objectMapper.writeValueAsString(queryWithFalse);
    Assert.assertEquals(withFalse, actualWithFalse);
    String actualWithTrue = objectMapper.writeValueAsString(queryWithTrue);
    Assert.assertEquals(withTrue, actualWithTrue);
    Assert.assertEquals(queryWithNull, objectMapper.readValue(actualWithNull, SelectQuery.class));
    Assert.assertEquals(queryWithFalse, objectMapper.readValue(actualWithFalse, SelectQuery.class));
    Assert.assertEquals(queryWithTrue, objectMapper.readValue(actualWithTrue, SelectQuery.class));
}
Also used : TableDataSource(io.druid.query.TableDataSource) LegacySegmentSpec(io.druid.query.spec.LegacySegmentSpec) Interval(org.joda.time.Interval) Test(org.junit.Test)

Example 25 with TableDataSource

use of io.druid.query.TableDataSource in project druid by druid-io.

the class SinkQuerySegmentWalker method getQueryRunnerForSegments.

@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(final Query<T> query, final Iterable<SegmentDescriptor> specs) {
    // We only handle one particular dataSource. Make sure that's what we have, then ignore from here on out.
    if (!(query.getDataSource() instanceof TableDataSource) || !dataSource.equals(((TableDataSource) query.getDataSource()).getName())) {
        log.makeAlert("Received query for unknown dataSource").addData("dataSource", query.getDataSource()).emit();
        return new NoopQueryRunner<>();
    }
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    if (factory == null) {
        throw new ISE("Unknown query type[%s].", query.getClass());
    }
    final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
    final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = new Function<Query<T>, ServiceMetricEvent.Builder>() {

        @Override
        public ServiceMetricEvent.Builder apply(@Nullable Query<T> input) {
            return toolChest.makeMetricBuilder(query);
        }
    };
    final boolean skipIncrementalSegment = query.getContextValue(CONTEXT_SKIP_INCREMENTAL_SEGMENT, false);
    final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
    return CPUTimeMetricQueryRunner.safeBuild(toolChest.mergeResults(factory.mergeRunners(queryExecutorService, FunctionalIterable.create(specs).transform(new Function<SegmentDescriptor, QueryRunner<T>>() {

        @Override
        public QueryRunner<T> apply(final SegmentDescriptor descriptor) {
            final PartitionHolder<Sink> holder = sinkTimeline.findEntry(descriptor.getInterval(), descriptor.getVersion());
            if (holder == null) {
                return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
            }
            final PartitionChunk<Sink> chunk = holder.getChunk(descriptor.getPartitionNumber());
            if (chunk == null) {
                return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
            }
            final Sink theSink = chunk.getObject();
            final String sinkSegmentIdentifier = theSink.getSegment().getIdentifier();
            return new SpecificSegmentQueryRunner<>(withPerSinkMetrics(new BySegmentQueryRunner<>(sinkSegmentIdentifier, descriptor.getInterval().getStart(), factory.mergeRunners(MoreExecutors.sameThreadExecutor(), Iterables.transform(theSink, new Function<FireHydrant, QueryRunner<T>>() {

                @Override
                public QueryRunner<T> apply(final FireHydrant hydrant) {
                    // Hydrant might swap at any point, but if it's swapped at the start
                    // then we know it's *definitely* swapped.
                    final boolean hydrantDefinitelySwapped = hydrant.hasSwapped();
                    if (skipIncrementalSegment && !hydrantDefinitelySwapped) {
                        return new NoopQueryRunner<>();
                    }
                    // Prevent the underlying segment from swapping when its being iterated
                    final Pair<Segment, Closeable> segment = hydrant.getAndIncrementSegment();
                    try {
                        QueryRunner<T> baseRunner = QueryRunnerHelper.makeClosingQueryRunner(factory.createRunner(segment.lhs), segment.rhs);
                        // 2) Hydrants are not the same between replicas, make sure cache is local
                        if (hydrantDefinitelySwapped && cache.isLocal()) {
                            return new CachingQueryRunner<>(makeHydrantCacheIdentifier(hydrant), descriptor, objectMapper, cache, toolChest, baseRunner, MoreExecutors.sameThreadExecutor(), cacheConfig);
                        } else {
                            return baseRunner;
                        }
                    } catch (RuntimeException e) {
                        CloseQuietly.close(segment.rhs);
                        throw e;
                    }
                }
            }))), builderFn, sinkSegmentIdentifier, cpuTimeAccumulator), new SpecificSegmentSpec(descriptor));
        }
    }))), builderFn, emitter, cpuTimeAccumulator, true);
}
Also used : Query(io.druid.query.Query) Function(com.google.common.base.Function) ReportTimelineMissingSegmentQueryRunner(io.druid.query.ReportTimelineMissingSegmentQueryRunner) Sink(io.druid.segment.realtime.plumber.Sink) SpecificSegmentQueryRunner(io.druid.query.spec.SpecificSegmentQueryRunner) NoopQueryRunner(io.druid.query.NoopQueryRunner) SegmentDescriptor(io.druid.query.SegmentDescriptor) ISE(io.druid.java.util.common.ISE) FireHydrant(io.druid.segment.realtime.FireHydrant) Pair(io.druid.java.util.common.Pair) BySegmentQueryRunner(io.druid.query.BySegmentQueryRunner) MetricsEmittingQueryRunner(io.druid.query.MetricsEmittingQueryRunner) ReportTimelineMissingSegmentQueryRunner(io.druid.query.ReportTimelineMissingSegmentQueryRunner) CachingQueryRunner(io.druid.client.CachingQueryRunner) BySegmentQueryRunner(io.druid.query.BySegmentQueryRunner) SpecificSegmentQueryRunner(io.druid.query.spec.SpecificSegmentQueryRunner) QueryRunner(io.druid.query.QueryRunner) CPUTimeMetricQueryRunner(io.druid.query.CPUTimeMetricQueryRunner) NoopQueryRunner(io.druid.query.NoopQueryRunner) AtomicLong(java.util.concurrent.atomic.AtomicLong) TableDataSource(io.druid.query.TableDataSource) SpecificSegmentSpec(io.druid.query.spec.SpecificSegmentSpec) ServiceMetricEvent(com.metamx.emitter.service.ServiceMetricEvent) Nullable(javax.annotation.Nullable)

Aggregations

TableDataSource (io.druid.query.TableDataSource)25 Interval (org.joda.time.Interval)18 Test (org.junit.Test)17 Result (io.druid.query.Result)8 MultipleIntervalSegmentSpec (io.druid.query.spec.MultipleIntervalSegmentSpec)7 TimelineObjectHolder (io.druid.timeline.TimelineObjectHolder)7 DateTime (org.joda.time.DateTime)7 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)6 DefaultObjectMapper (io.druid.jackson.DefaultObjectMapper)5 DataSegment (io.druid.timeline.DataSegment)5 Pair (io.druid.java.util.common.Pair)4 CountAggregatorFactory (io.druid.query.aggregation.CountAggregatorFactory)4 TimelineLookup (io.druid.timeline.TimelineLookup)4 CountDownLatch (java.util.concurrent.CountDownLatch)4 Function (com.google.common.base.Function)3 ImmutableMap (com.google.common.collect.ImmutableMap)3 Query (io.druid.query.Query)3 QueryRunner (io.druid.query.QueryRunner)3 SegmentDescriptor (io.druid.query.SegmentDescriptor)3 AggregatorFactory (io.druid.query.aggregation.AggregatorFactory)3