use of org.apache.druid.query.filter.DimFilter in project druid by druid-io.
the class GroupByRules method translateAggregateCall.
/**
* Translate an AggregateCall to Druid equivalents.
*
* @return translated aggregation, or null if translation failed.
*/
public static Aggregation translateAggregateCall(final PlannerContext plannerContext, final RowSignature rowSignature, final VirtualColumnRegistry virtualColumnRegistry, final RexBuilder rexBuilder, final Project project, final List<Aggregation> existingAggregations, final String name, final AggregateCall call, final boolean finalizeAggregations) {
final DimFilter filter;
if (call.filterArg >= 0) {
// AGG(xxx) FILTER(WHERE yyy)
if (project == null) {
// We need some kind of projection to support filtered aggregations.
return null;
}
final RexNode expression = project.getChildExps().get(call.filterArg);
final DimFilter nonOptimizedFilter = Expressions.toFilter(plannerContext, rowSignature, virtualColumnRegistry, expression);
if (nonOptimizedFilter == null) {
return null;
} else {
filter = Filtration.create(nonOptimizedFilter).optimizeFilterOnly(virtualColumnRegistry.getFullRowSignature()).getDimFilter();
}
} else {
filter = null;
}
final SqlAggregator sqlAggregator = plannerContext.getOperatorTable().lookupAggregator(call.getAggregation());
if (sqlAggregator == null) {
return null;
}
// Compute existingAggregations for SqlAggregator impls that want it.
final List<Aggregation> existingAggregationsWithSameFilter = new ArrayList<>();
for (Aggregation existingAggregation : existingAggregations) {
if (filter == null) {
final boolean doesMatch = existingAggregation.getAggregatorFactories().stream().noneMatch(factory -> factory instanceof FilteredAggregatorFactory);
if (doesMatch) {
existingAggregationsWithSameFilter.add(existingAggregation);
}
} else {
final boolean doesMatch = existingAggregation.getAggregatorFactories().stream().allMatch(factory -> factory instanceof FilteredAggregatorFactory && ((FilteredAggregatorFactory) factory).getFilter().equals(filter));
if (doesMatch) {
existingAggregationsWithSameFilter.add(Aggregation.create(existingAggregation.getAggregatorFactories().stream().map(factory -> ((FilteredAggregatorFactory) factory).getAggregator()).collect(Collectors.toList()), existingAggregation.getPostAggregator()));
}
}
}
final Aggregation retVal = sqlAggregator.toDruidAggregation(plannerContext, rowSignature, virtualColumnRegistry, rexBuilder, name, call, project, existingAggregationsWithSameFilter, finalizeAggregations);
if (retVal == null) {
return null;
} else {
// Check if this refers to the existingAggregationsWithSameFilter. If so, no need to apply the filter.
if (isUsingExistingAggregation(retVal, existingAggregationsWithSameFilter)) {
return retVal;
} else {
return retVal.filter(rowSignature, virtualColumnRegistry, filter);
}
}
}
use of org.apache.druid.query.filter.DimFilter in project hive by apache.
the class DruidStorageHandlerUtils method toDruidFilter.
@Nullable
private static DimFilter toDruidFilter(ExprNodeDesc filterExpr, Configuration configuration, List<VirtualColumn> virtualColumns, boolean resolveDynamicValues) {
if (filterExpr == null) {
return null;
}
Class<? extends GenericUDF> genericUDFClass = getGenericUDFClassFromExprDesc(filterExpr);
if (FunctionRegistry.isOpAnd(filterExpr)) {
Iterator<ExprNodeDesc> iterator = filterExpr.getChildren().iterator();
List<DimFilter> delegates = Lists.newArrayList();
while (iterator.hasNext()) {
DimFilter filter = toDruidFilter(iterator.next(), configuration, virtualColumns, resolveDynamicValues);
if (filter != null) {
delegates.add(filter);
}
}
if (!delegates.isEmpty()) {
return new AndDimFilter(delegates);
}
}
if (FunctionRegistry.isOpOr(filterExpr)) {
Iterator<ExprNodeDesc> iterator = filterExpr.getChildren().iterator();
List<DimFilter> delegates = Lists.newArrayList();
while (iterator.hasNext()) {
DimFilter filter = toDruidFilter(iterator.next(), configuration, virtualColumns, resolveDynamicValues);
if (filter != null) {
delegates.add(filter);
}
}
if (!delegates.isEmpty()) {
return new OrDimFilter(delegates);
}
} else if (GenericUDFBetween.class == genericUDFClass) {
List<ExprNodeDesc> child = filterExpr.getChildren();
String col = extractColName(child.get(1), virtualColumns);
if (col != null) {
try {
StringComparator comparator = stringTypeInfos.contains(child.get(1).getTypeInfo()) ? StringComparators.LEXICOGRAPHIC : StringComparators.NUMERIC;
String lower = evaluate(child.get(2), configuration, resolveDynamicValues);
String upper = evaluate(child.get(3), configuration, resolveDynamicValues);
return new BoundDimFilter(col, lower, upper, false, false, null, null, comparator);
} catch (HiveException e) {
throw new RuntimeException(e);
}
}
} else if (GenericUDFInBloomFilter.class == genericUDFClass) {
List<ExprNodeDesc> child = filterExpr.getChildren();
String col = extractColName(child.get(0), virtualColumns);
if (col != null) {
try {
BloomKFilter bloomFilter = evaluateBloomFilter(child.get(1), configuration, resolveDynamicValues);
return new BloomDimFilter(col, BloomKFilterHolder.fromBloomKFilter(bloomFilter), null);
} catch (HiveException | IOException e) {
throw new RuntimeException(e);
}
}
}
return null;
}
Aggregations