Search in sources :

Example 1 with DataSource

use of io.druid.query.DataSource in project druid by druid-io.

the class ServerManager method getQueryRunnerForIntervals.

@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
    final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
    if (factory == null) {
        throw new ISE("Unknown query type[%s].", query.getClass());
    }
    final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
    final Function<Query<T>, ServiceMetricEvent.Builder> builderFn = getBuilderFn(toolChest);
    final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
    DataSource dataSource = query.getDataSource();
    if (!(dataSource instanceof TableDataSource)) {
        throw new UnsupportedOperationException("data source type '" + dataSource.getClass().getName() + "' unsupported");
    }
    String dataSourceName = getDataSourceName(dataSource);
    final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline = dataSources.get(dataSourceName);
    if (timeline == null) {
        return new NoopQueryRunner<T>();
    }
    FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(intervals).transformCat(new Function<Interval, Iterable<TimelineObjectHolder<String, ReferenceCountingSegment>>>() {

        @Override
        public Iterable<TimelineObjectHolder<String, ReferenceCountingSegment>> apply(Interval input) {
            return timeline.lookup(input);
        }
    }).transformCat(new Function<TimelineObjectHolder<String, ReferenceCountingSegment>, Iterable<QueryRunner<T>>>() {

        @Override
        public Iterable<QueryRunner<T>> apply(@Nullable final TimelineObjectHolder<String, ReferenceCountingSegment> holder) {
            if (holder == null) {
                return null;
            }
            return FunctionalIterable.create(holder.getObject()).transform(new Function<PartitionChunk<ReferenceCountingSegment>, QueryRunner<T>>() {

                @Override
                public QueryRunner<T> apply(PartitionChunk<ReferenceCountingSegment> input) {
                    return buildAndDecorateQueryRunner(factory, toolChest, input.getObject(), new SegmentDescriptor(holder.getInterval(), holder.getVersion(), input.getChunkNumber()), builderFn, cpuTimeAccumulator);
                }
            });
        }
    });
    return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<T>(toolChest.mergeResults(factory.mergeRunners(exec, queryRunners)), toolChest), builderFn, emitter, cpuTimeAccumulator, true);
}
Also used : ReferenceCountingSegment(io.druid.segment.ReferenceCountingSegment) Query(io.druid.query.Query) FunctionalIterable(io.druid.java.util.common.guava.FunctionalIterable) Function(com.google.common.base.Function) NoopQueryRunner(io.druid.query.NoopQueryRunner) SegmentDescriptor(io.druid.query.SegmentDescriptor) ISE(io.druid.java.util.common.ISE) PartitionChunk(io.druid.timeline.partition.PartitionChunk) MetricsEmittingQueryRunner(io.druid.query.MetricsEmittingQueryRunner) ReportTimelineMissingSegmentQueryRunner(io.druid.query.ReportTimelineMissingSegmentQueryRunner) BySegmentQueryRunner(io.druid.query.BySegmentQueryRunner) SpecificSegmentQueryRunner(io.druid.query.spec.SpecificSegmentQueryRunner) ReferenceCountingSegmentQueryRunner(io.druid.query.ReferenceCountingSegmentQueryRunner) FinalizeResultsQueryRunner(io.druid.query.FinalizeResultsQueryRunner) CPUTimeMetricQueryRunner(io.druid.query.CPUTimeMetricQueryRunner) NoopQueryRunner(io.druid.query.NoopQueryRunner) CachingQueryRunner(io.druid.client.CachingQueryRunner) QueryRunner(io.druid.query.QueryRunner) TableDataSource(io.druid.query.TableDataSource) DataSource(io.druid.query.DataSource) AtomicLong(java.util.concurrent.atomic.AtomicLong) TimelineObjectHolder(io.druid.timeline.TimelineObjectHolder) TableDataSource(io.druid.query.TableDataSource) Interval(org.joda.time.Interval)

Example 2 with DataSource

use of io.druid.query.DataSource in project druid by druid-io.

the class GroupByQueryQueryToolChest method mergeGroupByResults.

private Sequence<Row> mergeGroupByResults(GroupByStrategy groupByStrategy, final GroupByQuery query, GroupByQueryResource resource, QueryRunner<Row> runner, Map<String, Object> context) {
    // If there's a subquery, merge subquery results and then apply the aggregator
    final DataSource dataSource = query.getDataSource();
    if (dataSource instanceof QueryDataSource) {
        final GroupByQuery subquery;
        try {
            // Inject outer query context keys into subquery if they don't already exist in the subquery context.
            // Unlike withOverriddenContext's normal behavior, we want keys present in the subquery to win.
            final Map<String, Object> subqueryContext = Maps.newTreeMap();
            if (query.getContext() != null) {
                for (Map.Entry<String, Object> entry : query.getContext().entrySet()) {
                    if (entry.getValue() != null) {
                        subqueryContext.put(entry.getKey(), entry.getValue());
                    }
                }
            }
            if (((QueryDataSource) dataSource).getQuery().getContext() != null) {
                subqueryContext.putAll(((QueryDataSource) dataSource).getQuery().getContext());
            }
            subqueryContext.put(GroupByQuery.CTX_KEY_SORT_BY_DIMS_FIRST, false);
            subquery = (GroupByQuery) ((QueryDataSource) dataSource).getQuery().withOverriddenContext(subqueryContext);
        } catch (ClassCastException e) {
            throw new UnsupportedOperationException("Subqueries must be of type 'group by'");
        }
        final Sequence<Row> subqueryResult = mergeGroupByResults(groupByStrategy, subquery.withOverriddenContext(ImmutableMap.<String, Object>of(//in the end when returning results to user. (note this is only respected by groupBy v1)
        GroupByQueryHelper.CTX_KEY_SORT_RESULTS, false)), resource, runner, context);
        final Sequence<Row> finalizingResults;
        if (GroupByQuery.getContextFinalize(subquery, false)) {
            finalizingResults = new MappedSequence<>(subqueryResult, makePreComputeManipulatorFn(subquery, MetricManipulatorFns.finalizing()));
        } else {
            finalizingResults = subqueryResult;
        }
        return groupByStrategy.processSubqueryResult(subquery, query, resource, finalizingResults);
    } else {
        return groupByStrategy.mergeResults(runner, query, context);
    }
}
Also used : DataSource(io.druid.query.DataSource) QueryDataSource(io.druid.query.QueryDataSource) QueryDataSource(io.druid.query.QueryDataSource) Row(io.druid.data.input.Row) MapBasedRow(io.druid.data.input.MapBasedRow) HashMap(java.util.HashMap) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap)

Aggregations

DataSource (io.druid.query.DataSource)2 Function (com.google.common.base.Function)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 CachingQueryRunner (io.druid.client.CachingQueryRunner)1 MapBasedRow (io.druid.data.input.MapBasedRow)1 Row (io.druid.data.input.Row)1 ISE (io.druid.java.util.common.ISE)1 FunctionalIterable (io.druid.java.util.common.guava.FunctionalIterable)1 BySegmentQueryRunner (io.druid.query.BySegmentQueryRunner)1 CPUTimeMetricQueryRunner (io.druid.query.CPUTimeMetricQueryRunner)1 FinalizeResultsQueryRunner (io.druid.query.FinalizeResultsQueryRunner)1 MetricsEmittingQueryRunner (io.druid.query.MetricsEmittingQueryRunner)1 NoopQueryRunner (io.druid.query.NoopQueryRunner)1 Query (io.druid.query.Query)1 QueryDataSource (io.druid.query.QueryDataSource)1 QueryRunner (io.druid.query.QueryRunner)1 ReferenceCountingSegmentQueryRunner (io.druid.query.ReferenceCountingSegmentQueryRunner)1 ReportTimelineMissingSegmentQueryRunner (io.druid.query.ReportTimelineMissingSegmentQueryRunner)1 SegmentDescriptor (io.druid.query.SegmentDescriptor)1 TableDataSource (io.druid.query.TableDataSource)1