use of org.apache.druid.query.planning.DataSourceAnalysis in project druid by druid-io.
the class ClientQuerySegmentWalker method canRunQueryUsingClusterWalker.
/**
* Checks if a query can be handled wholly by {@link #clusterClient}. Assumes that it is a
* {@link CachingClusteredClient} or something that behaves similarly.
*/
private <T> boolean canRunQueryUsingClusterWalker(Query<T> query) {
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final QueryToolChest<T, Query<T>> toolChest = warehouse.getToolChest(query);
// subqueries on its own).
return analysis.isConcreteTableBased() && (!analysis.isQuery() || toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery()));
}
use of org.apache.druid.query.planning.DataSourceAnalysis in project druid by druid-io.
the class ServerManager method getQueryRunnerForSegments.
@Override
public <T> QueryRunner<T> getQueryRunnerForSegments(Query<T> query, Iterable<SegmentDescriptor> specs) {
final QueryRunnerFactory<T, Query<T>> factory = conglomerate.findFactory(query);
if (factory == null) {
final QueryUnsupportedException e = new QueryUnsupportedException(StringUtils.format("Unknown query type, [%s]", query.getClass()));
log.makeAlert(e, "Error while executing a query[%s]", query.getId()).addData("dataSource", query.getDataSource()).emit();
throw e;
}
final QueryToolChest<T, Query<T>> toolChest = factory.getToolchest();
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final AtomicLong cpuTimeAccumulator = new AtomicLong(0L);
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline;
final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(analysis);
// Make sure this query type can handle the subquery, if present.
if (analysis.isQuery() && !toolChest.canPerformSubquery(((QueryDataSource) analysis.getDataSource()).getQuery())) {
throw new ISE("Cannot handle subquery: %s", analysis.getDataSource());
}
if (maybeTimeline.isPresent()) {
timeline = maybeTimeline.get();
} else {
return new ReportTimelineMissingSegmentQueryRunner<>(Lists.newArrayList(specs));
}
// segmentMapFn maps each base Segment into a joined Segment if necessary.
final Function<SegmentReference, SegmentReference> segmentMapFn = joinableFactoryWrapper.createSegmentMapFn(analysis.getJoinBaseTableFilter().map(Filters::toFilter).orElse(null), analysis.getPreJoinableClauses(), cpuTimeAccumulator, analysis.getBaseQuery().orElse(query));
// We compute the join cache key here itself so it doesn't need to be re-computed for every segment
final Optional<byte[]> cacheKeyPrefix = analysis.isJoin() ? joinableFactoryWrapper.computeJoinDataSourceCacheKey(analysis) : Optional.of(StringUtils.EMPTY_BYTES);
final FunctionalIterable<QueryRunner<T>> queryRunners = FunctionalIterable.create(specs).transformCat(descriptor -> Collections.singletonList(buildQueryRunnerForSegment(query, descriptor, factory, toolChest, timeline, segmentMapFn, cpuTimeAccumulator, cacheKeyPrefix)));
return CPUTimeMetricQueryRunner.safeBuild(new FinalizeResultsQueryRunner<>(toolChest.mergeResults(factory.mergeRunners(queryProcessingPool, queryRunners)), toolChest), toolChest, emitter, cpuTimeAccumulator, true);
}
use of org.apache.druid.query.planning.DataSourceAnalysis in project druid by druid-io.
the class ServerManager method getQueryRunnerForIntervals.
@Override
public <T> QueryRunner<T> getQueryRunnerForIntervals(Query<T> query, Iterable<Interval> intervals) {
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final VersionedIntervalTimeline<String, ReferenceCountingSegment> timeline;
final Optional<VersionedIntervalTimeline<String, ReferenceCountingSegment>> maybeTimeline = segmentManager.getTimeline(analysis);
if (maybeTimeline.isPresent()) {
timeline = maybeTimeline.get();
} else {
// we must find.
return new NoopQueryRunner<>();
}
FunctionalIterable<SegmentDescriptor> segmentDescriptors = FunctionalIterable.create(intervals).transformCat(timeline::lookup).transformCat(holder -> {
if (holder == null) {
return null;
}
return FunctionalIterable.create(holder.getObject()).transform(partitionChunk -> new SegmentDescriptor(holder.getInterval(), holder.getVersion(), partitionChunk.getChunkNumber()));
});
return getQueryRunnerForSegments(query, segmentDescriptors);
}
use of org.apache.druid.query.planning.DataSourceAnalysis in project druid by druid-io.
the class SingleTaskBackgroundRunner method getQueryRunnerImpl.
private <T> QueryRunner<T> getQueryRunnerImpl(Query<T> query) {
QueryRunner<T> queryRunner = null;
if (runningItem != null) {
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
final Task task = runningItem.getTask();
if (analysis.getBaseTableDataSource().isPresent() && task.getDataSource().equals(analysis.getBaseTableDataSource().get().getName())) {
final QueryRunner<T> taskQueryRunner = task.getQueryRunner(query);
if (taskQueryRunner != null) {
queryRunner = taskQueryRunner;
}
}
}
return new SetAndVerifyContextQueryRunner<>(serverConfig, queryRunner == null ? new NoopQueryRunner<>() : queryRunner);
}
use of org.apache.druid.query.planning.DataSourceAnalysis in project druid by druid-io.
the class Queries method withBaseDataSource.
/**
* Rewrite "query" to refer to some specific base datasource, instead of the one it currently refers to.
*
* Unlike the seemingly-similar {@link Query#withDataSource}, this will walk down the datasource tree and replace
* only the base datasource (in the sense defined in {@link DataSourceAnalysis}).
*/
public static <T> Query<T> withBaseDataSource(final Query<T> query, final DataSource newBaseDataSource) {
final Query<T> retVal;
if (query.getDataSource() instanceof QueryDataSource) {
final Query<?> subQuery = ((QueryDataSource) query.getDataSource()).getQuery();
retVal = query.withDataSource(new QueryDataSource(withBaseDataSource(subQuery, newBaseDataSource)));
} else {
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(query.getDataSource());
DataSource current = newBaseDataSource;
DimFilter joinBaseFilter = analysis.getJoinBaseTableFilter().orElse(null);
for (final PreJoinableClause clause : analysis.getPreJoinableClauses()) {
current = JoinDataSource.create(current, clause.getDataSource(), clause.getPrefix(), clause.getCondition(), clause.getJoinType(), joinBaseFilter);
joinBaseFilter = null;
}
retVal = query.withDataSource(current);
}
// Verify postconditions, just in case.
final DataSourceAnalysis analysis = DataSourceAnalysis.forDataSource(retVal.getDataSource());
if (!newBaseDataSource.equals(analysis.getBaseDataSource())) {
throw new ISE("Unable to replace base dataSource");
}
return retVal;
}
Aggregations