use of org.apache.druid.query.DataSource in project druid by druid-io.
the class MovingAverageQueryRunner method run.
@Override
public Sequence<Row> run(QueryPlus<Row> query, ResponseContext responseContext) {
MovingAverageQuery maq = (MovingAverageQuery) query.getQuery();
List<Interval> intervals;
final Period period;
// Get the largest bucket from the list of averagers
Optional<Integer> opt = maq.getAveragerSpecs().stream().map(AveragerFactory::getNumBuckets).max(Integer::compare);
int buckets = opt.orElse(0);
// Extend the interval beginning by specified bucket - 1
if (maq.getGranularity() instanceof PeriodGranularity) {
period = ((PeriodGranularity) maq.getGranularity()).getPeriod();
int offset = buckets <= 0 ? 0 : (1 - buckets);
intervals = maq.getIntervals().stream().map(i -> new Interval(i.getStart().withPeriodAdded(period, offset), i.getEnd())).collect(Collectors.toList());
} else {
throw new ISE("Only PeriodGranulaity is supported for movingAverage queries");
}
Sequence<Row> resultsSeq;
DataSource dataSource = maq.getDataSource();
if (maq.getDimensions() != null && !maq.getDimensions().isEmpty() && (dataSource instanceof TableDataSource || dataSource instanceof UnionDataSource || dataSource instanceof QueryDataSource)) {
// build groupBy query from movingAverage query
GroupByQuery.Builder builder = GroupByQuery.builder().setDataSource(dataSource).setInterval(intervals).setDimFilter(maq.getFilter()).setGranularity(maq.getGranularity()).setDimensions(maq.getDimensions()).setAggregatorSpecs(maq.getAggregatorSpecs()).setPostAggregatorSpecs(maq.getPostAggregatorSpecs()).setContext(maq.getContext());
GroupByQuery gbq = builder.build();
ResponseContext gbqResponseContext = ResponseContext.createEmpty();
gbqResponseContext.merge(responseContext);
gbqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(gbq));
Sequence<ResultRow> results = gbq.getRunner(walker).run(QueryPlus.wrap(gbq), gbqResponseContext);
try {
// use localhost for remote address
requestLogger.logNativeQuery(RequestLogLine.forNative(gbq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
} catch (Exception e) {
throw Throwables.propagate(e);
}
resultsSeq = results.map(row -> row.toMapBasedRow(gbq));
} else {
// no dimensions, so optimize this as a TimeSeries
TimeseriesQuery tsq = new TimeseriesQuery(dataSource, new MultipleIntervalSegmentSpec(intervals), false, null, maq.getFilter(), maq.getGranularity(), maq.getAggregatorSpecs(), maq.getPostAggregatorSpecs(), 0, maq.getContext());
ResponseContext tsqResponseContext = ResponseContext.createEmpty();
tsqResponseContext.merge(responseContext);
tsqResponseContext.putQueryFailDeadlineMs(System.currentTimeMillis() + QueryContexts.getTimeout(tsq));
Sequence<Result<TimeseriesResultValue>> results = tsq.getRunner(walker).run(QueryPlus.wrap(tsq), tsqResponseContext);
try {
// use localhost for remote address
requestLogger.logNativeQuery(RequestLogLine.forNative(tsq, DateTimes.nowUtc(), "127.0.0.1", new QueryStats(ImmutableMap.of("query/time", 0, "query/bytes", 0, "success", true))));
} catch (Exception e) {
throw Throwables.propagate(e);
}
resultsSeq = Sequences.map(results, new TimeseriesResultToRow());
}
// Process into period buckets
Sequence<RowBucket> bucketedMovingAvgResults = Sequences.simple(new RowBucketIterable(resultsSeq, intervals, period));
// Apply the windows analysis functions
Sequence<Row> movingAvgResults = Sequences.simple(new MovingAverageIterable(bucketedMovingAvgResults, maq.getDimensions(), maq.getAveragerSpecs(), maq.getPostAggregatorSpecs(), maq.getAggregatorSpecs()));
// Apply any postAveragers
Sequence<Row> movingAvgResultsWithPostAveragers = Sequences.map(movingAvgResults, new PostAveragerAggregatorCalculator(maq));
// remove rows outside the reporting window
List<Interval> reportingIntervals = maq.getIntervals();
movingAvgResults = Sequences.filter(movingAvgResultsWithPostAveragers, row -> reportingIntervals.stream().anyMatch(i -> i.contains(row.getTimestamp())));
// Apply any having, sorting, and limits
movingAvgResults = maq.applyLimit(movingAvgResults);
return movingAvgResults;
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class GroupByStrategyV2 method numMergeBuffersNeededForSubtotalsSpec.
private static int numMergeBuffersNeededForSubtotalsSpec(GroupByQuery query) {
List<List<String>> subtotalSpecs = query.getSubtotalsSpec();
final DataSource dataSource = query.getDataSource();
int numMergeBuffersNeededForSubQuerySubtotal = 0;
if (dataSource instanceof QueryDataSource) {
Query<?> subQuery = ((QueryDataSource) dataSource).getQuery();
if (subQuery instanceof GroupByQuery) {
numMergeBuffersNeededForSubQuerySubtotal = numMergeBuffersNeededForSubtotalsSpec((GroupByQuery) subQuery);
}
}
if (subtotalSpecs == null || subtotalSpecs.size() == 0) {
return numMergeBuffersNeededForSubQuerySubtotal;
}
List<String> queryDimOutputNames = query.getDimensions().stream().map(DimensionSpec::getOutputName).collect(Collectors.toList());
for (List<String> subtotalSpec : subtotalSpecs) {
if (!Utils.isPrefix(subtotalSpec, queryDimOutputNames)) {
return 2;
}
}
return Math.max(1, numMergeBuffersNeededForSubQuerySubtotal);
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class DataSourceAnalysis method flattenJoin.
/**
* Flatten a datasource into two parts: the left-hand side datasource (the 'base' datasource), and a list of join
* clauses, if any.
*
* @throws IllegalArgumentException if dataSource cannot be fully flattened.
*/
private static Triple<DataSource, DimFilter, List<PreJoinableClause>> flattenJoin(final JoinDataSource dataSource) {
DataSource current = dataSource;
DimFilter currentDimFilter = null;
final List<PreJoinableClause> preJoinableClauses = new ArrayList<>();
while (current instanceof JoinDataSource) {
final JoinDataSource joinDataSource = (JoinDataSource) current;
current = joinDataSource.getLeft();
if (currentDimFilter != null) {
throw new IAE("Left filters are only allowed when left child is direct table access");
}
currentDimFilter = joinDataSource.getLeftFilter();
preJoinableClauses.add(new PreJoinableClause(joinDataSource.getRightPrefix(), joinDataSource.getRight(), joinDataSource.getJoinType(), joinDataSource.getConditionAnalysis()));
}
// Join clauses were added in the order we saw them while traversing down, but we need to apply them in the
// going-up order. So reverse them.
Collections.reverse(preJoinableClauses);
return Triple.of(current, currentDimFilter, preJoinableClauses);
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class JoinableFactoryWrapperTest method test_computeJoinDataSourceCacheKey_noClauses.
@Test
public void test_computeJoinDataSourceCacheKey_noClauses() {
DataSourceAnalysis analysis = EasyMock.mock(DataSourceAnalysis.class);
DataSource dataSource = new NoopDataSource();
EasyMock.expect(analysis.getPreJoinableClauses()).andReturn(Collections.emptyList());
EasyMock.expect(analysis.getJoinBaseTableFilter()).andReturn(Optional.empty());
EasyMock.expect(analysis.getDataSource()).andReturn(dataSource);
EasyMock.replay(analysis);
JoinableFactoryWrapper joinableFactoryWrapper = new JoinableFactoryWrapper(new JoinableFactoryWithCacheKey());
expectedException.expect(IAE.class);
expectedException.expectMessage(StringUtils.format("No join clauses to build the cache key for data source [%s]", dataSource));
joinableFactoryWrapper.computeJoinDataSourceCacheKey(analysis);
}
use of org.apache.druid.query.DataSource in project druid by druid-io.
the class QueryStackTests method makeJoinableFactoryFromDefault.
public static JoinableFactory makeJoinableFactoryFromDefault(@Nullable LookupExtractorFactoryContainerProvider lookupProvider, @Nullable Set<JoinableFactory> customFactories, @Nullable Map<Class<? extends JoinableFactory>, Class<? extends DataSource>> customMappings) {
ImmutableSet.Builder<JoinableFactory> setBuilder = ImmutableSet.builder();
ImmutableMap.Builder<Class<? extends JoinableFactory>, Class<? extends DataSource>> mapBuilder = ImmutableMap.builder();
setBuilder.add(new InlineJoinableFactory());
mapBuilder.put(InlineJoinableFactory.class, InlineDataSource.class);
if (lookupProvider != null) {
setBuilder.add(new LookupJoinableFactory(lookupProvider));
mapBuilder.put(LookupJoinableFactory.class, LookupDataSource.class);
}
if (customFactories != null) {
setBuilder.addAll(customFactories);
}
if (customMappings != null) {
mapBuilder.putAll(customMappings);
}
return new MapJoinableFactory(setBuilder.build(), mapBuilder.build());
}
Aggregations