use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Filter in project calcite by apache.
the class DruidQuery method signature.
/**
* Returns a string describing the operations inside this query.
*
* <p>For example, "sfpahol" means {@link TableScan} (s)
* followed by {@link Filter} (f)
* followed by {@link Project} (p)
* followed by {@link Aggregate} (a)
* followed by {@link Filter} (h)
* followed by {@link Project} (o)
* followed by {@link Sort} (l).
*
* @see #isValidSignature(String)
*/
String signature() {
final StringBuilder b = new StringBuilder();
boolean flag = false;
for (RelNode rel : rels) {
b.append(rel instanceof TableScan ? 's' : (rel instanceof Project && flag) ? 'o' : (rel instanceof Filter && flag) ? 'h' : rel instanceof Aggregate ? 'a' : rel instanceof Filter ? 'f' : rel instanceof Sort ? 'l' : rel instanceof Project ? 'p' : '!');
flag = flag || rel instanceof Aggregate;
}
return b.toString();
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Filter in project calcite by apache.
the class DruidQuery method explainTerms.
@Override
public RelWriter explainTerms(RelWriter pw) {
for (RelNode rel : rels) {
if (rel instanceof TableScan) {
TableScan tableScan = (TableScan) rel;
pw.item("table", tableScan.getTable().getQualifiedName());
pw.item("intervals", intervals);
} else if (rel instanceof Filter) {
pw.item("filter", ((Filter) rel).getCondition());
} else if (rel instanceof Project) {
if (((Project) rel).getInput() instanceof Aggregate) {
pw.item("post_projects", ((Project) rel).getProjects());
} else {
pw.item("projects", ((Project) rel).getProjects());
}
} else if (rel instanceof Aggregate) {
final Aggregate aggregate = (Aggregate) rel;
pw.item("groups", aggregate.getGroupSet()).item("aggs", aggregate.getAggCallList());
} else if (rel instanceof Sort) {
final Sort sort = (Sort) rel;
for (Ord<RelFieldCollation> ord : Ord.zip(sort.collation.getFieldCollations())) {
pw.item("sort" + ord.i, ord.e.getFieldIndex());
}
for (Ord<RelFieldCollation> ord : Ord.zip(sort.collation.getFieldCollations())) {
pw.item("dir" + ord.i, ord.e.shortString());
}
pw.itemIf("fetch", sort.fetch, sort.fetch != null);
} else {
throw new AssertionError("rel type not supported in Druid query " + rel);
}
}
return pw;
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Filter in project druid by druid-io.
the class DruidQuery method toScanQuery.
/**
* Return this query as a Scan query, or null if this query is not compatible with Scan.
*
* @return query or null
*/
@Nullable
private ScanQuery toScanQuery(final QueryFeatureInspector queryFeatureInspector) {
if (grouping != null) {
// Scan cannot GROUP BY.
return null;
}
if (outputRowSignature.size() == 0) {
// Should never do a scan query without any columns that we're interested in. This is probably a planner bug.
throw new ISE("Cannot convert to Scan query without any columns.");
}
final Pair<DataSource, Filtration> dataSourceFiltrationPair = getFiltration(dataSource, filter, virtualColumnRegistry);
final DataSource newDataSource = dataSourceFiltrationPair.lhs;
final Filtration filtration = dataSourceFiltrationPair.rhs;
final List<ScanQuery.OrderBy> orderByColumns;
long scanOffset = 0L;
long scanLimit = 0L;
if (sorting != null) {
scanOffset = sorting.getOffsetLimit().getOffset();
if (sorting.getOffsetLimit().hasLimit()) {
final long limit = sorting.getOffsetLimit().getLimit();
if (limit == 0) {
// Can't handle zero limit (the Scan query engine would treat it as unlimited).
return null;
}
scanLimit = limit;
}
orderByColumns = sorting.getOrderBys().stream().map(orderBy -> new ScanQuery.OrderBy(orderBy.getDimension(), orderBy.getDirection() == OrderByColumnSpec.Direction.DESCENDING ? ScanQuery.Order.DESCENDING : ScanQuery.Order.ASCENDING)).collect(Collectors.toList());
} else {
orderByColumns = Collections.emptyList();
}
if (!queryFeatureInspector.feature(QueryFeature.SCAN_CAN_ORDER_BY_NON_TIME) && (orderByColumns.size() > 1 || orderByColumns.stream().anyMatch(orderBy -> !orderBy.getColumnName().equals(ColumnHolder.TIME_COLUMN_NAME)))) {
// Cannot handle this ordering.
// Scan cannot ORDER BY non-time columns.
plannerContext.setPlanningError("SQL query requires order by non-time column %s that is not supported.", orderByColumns);
return null;
}
// Compute the list of columns to select, sorted and deduped.
final SortedSet<String> scanColumns = new TreeSet<>(outputRowSignature.getColumnNames());
orderByColumns.forEach(column -> scanColumns.add(column.getColumnName()));
return new ScanQuery(newDataSource, filtration.getQuerySegmentSpec(), getVirtualColumns(true), ScanQuery.ResultFormat.RESULT_FORMAT_COMPACTED_LIST, 0, scanOffset, scanLimit, null, orderByColumns, filtration.getDimFilter(), ImmutableList.copyOf(scanColumns), false, ImmutableSortedMap.copyOf(plannerContext.getQueryContext()));
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Filter in project druid by druid-io.
the class FilterJoinExcludePushToChildRule method removeRedundantIsNotNullFilters.
/**
* This tries to find all the 'IS NOT NULL' filters in an inner join whose checking column is also
* a part of an equi-condition between the two tables. It removes such 'IS NOT NULL' filters from join since
* the equi-condition will never return true for null input, thus making the 'IS NOT NULL' filter a no-op.
* @param joinFilters
* @param joinType
* @param isSqlCompatible
*/
static void removeRedundantIsNotNullFilters(List<RexNode> joinFilters, JoinRelType joinType, boolean isSqlCompatible) {
if (joinType != JoinRelType.INNER || !isSqlCompatible) {
// only works for inner joins in SQL mode
return;
}
ImmutableList.Builder<RexNode> isNotNullFiltersBuilder = ImmutableList.builder();
ImmutableList.Builder<Pair<RexNode, RexNode>> equalityFiltersOperandBuilder = ImmutableList.builder();
joinFilters.stream().filter(joinFilter -> joinFilter instanceof RexCall).forEach(joinFilter -> {
if (joinFilter.isA(SqlKind.IS_NOT_NULL)) {
isNotNullFiltersBuilder.add(joinFilter);
} else if (joinFilter.isA(SqlKind.EQUALS)) {
List<RexNode> operands = ((RexCall) joinFilter).getOperands();
if (operands.size() == 2 && operands.stream().noneMatch(Objects::isNull)) {
equalityFiltersOperandBuilder.add(new Pair<>(operands.get(0), operands.get(1)));
}
}
});
List<Pair<RexNode, RexNode>> equalityFilters = equalityFiltersOperandBuilder.build();
ImmutableList.Builder<RexNode> removableFilters = ImmutableList.builder();
for (RexNode isNotNullFilter : isNotNullFiltersBuilder.build()) {
List<RexNode> operands = ((RexCall) isNotNullFilter).getOperands();
boolean canDrop = false;
for (Pair<RexNode, RexNode> equalityFilterOperands : equalityFilters) {
if ((equalityFilterOperands.lhs != null && equalityFilterOperands.lhs.equals(operands.get(0))) || (equalityFilterOperands.rhs != null && equalityFilterOperands.rhs.equals(operands.get(0)))) {
canDrop = true;
break;
}
}
if (canDrop) {
removableFilters.add(isNotNullFilter);
}
}
joinFilters.removeAll(removableFilters.build());
}
use of org.apache.beam.vendor.calcite.v1_28_0.org.apache.calcite.rel.core.Filter in project flink by apache.
the class RelDecorrelator method decorrelate.
protected RelNode decorrelate(RelNode root) {
// first adjust count() expression if any
final RelBuilderFactory f = relBuilderFactory();
HepProgram program = HepProgram.builder().addRuleInstance(AdjustProjectForCountAggregateRule.config(false, this, f).toRule()).addRuleInstance(AdjustProjectForCountAggregateRule.config(true, this, f).toRule()).addRuleInstance(FilterJoinRule.FilterIntoJoinRule.Config.DEFAULT.withRelBuilderFactory(f).withOperandSupplier(b0 -> b0.operand(Filter.class).oneInput(b1 -> b1.operand(Join.class).anyInputs())).withDescription("FilterJoinRule:filter").as(FilterJoinRule.FilterIntoJoinRule.Config.class).withSmart(true).withPredicate((join, joinType, exp) -> true).as(FilterJoinRule.FilterIntoJoinRule.Config.class).toRule()).addRuleInstance(CoreRules.FILTER_PROJECT_TRANSPOSE.config.withRelBuilderFactory(f).as(FilterProjectTransposeRule.Config.class).withOperandFor(Filter.class, filter -> !RexUtil.containsCorrelation(filter.getCondition()), Project.class, project -> true).withCopyFilter(true).withCopyProject(true).toRule()).addRuleInstance(FilterCorrelateRule.Config.DEFAULT.withRelBuilderFactory(f).toRule()).build();
HepPlanner planner = createPlanner(program);
planner.setRoot(root);
root = planner.findBestExp();
// Perform decorrelation.
map.clear();
final Frame frame = getInvoke(root, null);
if (frame != null) {
// has been rewritten; apply rules post-decorrelation
final HepProgram program2 = HepProgram.builder().addRuleInstance(CoreRules.FILTER_INTO_JOIN.config.withRelBuilderFactory(f).toRule()).addRuleInstance(CoreRules.JOIN_CONDITION_PUSH.config.withRelBuilderFactory(f).toRule()).build();
final HepPlanner planner2 = createPlanner(program2);
final RelNode newRoot = frame.r;
planner2.setRoot(newRoot);
return planner2.findBestExp();
}
return root;
}
Aggregations