use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.
the class ImplementExceptDistinctAsUnion method apply.
@Override
public Result apply(ExceptNode node, Captures captures, Context context) {
SetOperationNodeTranslator translator = new SetOperationNodeTranslator(context.getSession(), metadata, context.getSymbolAllocator(), context.getIdAllocator());
SetOperationNodeTranslator.TranslationResult result = translator.makeSetContainmentPlanForDistinct(node);
// except predicate: the row must be present in the first source and absent in all the other sources
ImmutableList.Builder<Expression> predicatesBuilder = ImmutableList.builder();
predicatesBuilder.add(new ComparisonExpression(GREATER_THAN_OR_EQUAL, result.getCountSymbols().get(0).toSymbolReference(), new GenericLiteral("BIGINT", "1")));
for (int i = 1; i < node.getSources().size(); i++) {
predicatesBuilder.add(new ComparisonExpression(EQUAL, result.getCountSymbols().get(i).toSymbolReference(), new GenericLiteral("BIGINT", "0")));
}
return Result.ofPlanNode(new ProjectNode(context.getIdAllocator().getNextId(), new FilterNode(context.getIdAllocator().getNextId(), result.getPlanNode(), and(predicatesBuilder.build())), Assignments.identity(node.getOutputSymbols())));
}
use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.
the class EliminateCrossJoins method buildJoinTree.
public static PlanNode buildJoinTree(List<Symbol> expectedOutputSymbols, JoinGraph graph, List<Integer> joinOrder, PlanNodeIdAllocator idAllocator) {
requireNonNull(expectedOutputSymbols, "expectedOutputSymbols is null");
requireNonNull(idAllocator, "idAllocator is null");
requireNonNull(graph, "graph is null");
joinOrder = ImmutableList.copyOf(requireNonNull(joinOrder, "joinOrder is null"));
checkArgument(joinOrder.size() >= 2);
PlanNode result = graph.getNode(joinOrder.get(0));
Set<PlanNodeId> alreadyJoinedNodes = new HashSet<>();
alreadyJoinedNodes.add(result.getId());
for (int i = 1; i < joinOrder.size(); i++) {
PlanNode rightNode = graph.getNode(joinOrder.get(i));
alreadyJoinedNodes.add(rightNode.getId());
ImmutableList.Builder<JoinNode.EquiJoinClause> criteria = ImmutableList.builder();
for (JoinGraph.Edge edge : graph.getEdges(rightNode)) {
PlanNode targetNode = edge.getTargetNode();
if (alreadyJoinedNodes.contains(targetNode.getId())) {
criteria.add(new JoinNode.EquiJoinClause(edge.getTargetSymbol(), edge.getSourceSymbol()));
}
}
result = new JoinNode(idAllocator.getNextId(), JoinNode.Type.INNER, result, rightNode, criteria.build(), result.getOutputSymbols(), rightNode.getOutputSymbols(), false, Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of(), Optional.empty());
}
List<Expression> filters = graph.getFilters();
for (Expression filter : filters) {
result = new FilterNode(idAllocator.getNextId(), result, filter);
}
// Some nodes are sensitive to what's produced (e.g., DistinctLimit node)
return restrictOutputs(idAllocator, result, ImmutableSet.copyOf(expectedOutputSymbols)).orElse(result);
}
use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.
the class ApplyTableScanRedirection method apply.
@Override
public Result apply(TableScanNode scanNode, Captures captures, Context context) {
Optional<TableScanRedirectApplicationResult> tableScanRedirectApplicationResult = plannerContext.getMetadata().applyTableScanRedirect(context.getSession(), scanNode.getTable());
if (tableScanRedirectApplicationResult.isEmpty()) {
return Result.empty();
}
CatalogSchemaTableName destinationTable = tableScanRedirectApplicationResult.get().getDestinationTable();
QualifiedObjectName destinationObjectName = convertFromSchemaTableName(destinationTable.getCatalogName()).apply(destinationTable.getSchemaTableName());
Optional<QualifiedObjectName> redirectedObjectName = plannerContext.getMetadata().getRedirectionAwareTableHandle(context.getSession(), destinationObjectName).getRedirectedTableName();
redirectedObjectName.ifPresent(name -> {
throw new TrinoException(NOT_SUPPORTED, format("Further redirection of destination table '%s' to '%s' is not supported", destinationObjectName, name));
});
TableMetadata tableMetadata = plannerContext.getMetadata().getTableMetadata(context.getSession(), scanNode.getTable());
CatalogSchemaTableName sourceTable = new CatalogSchemaTableName(tableMetadata.getCatalogName().getCatalogName(), tableMetadata.getTable());
if (destinationTable.equals(sourceTable)) {
return Result.empty();
}
Optional<TableHandle> destinationTableHandle = plannerContext.getMetadata().getTableHandle(context.getSession(), convertFromSchemaTableName(destinationTable.getCatalogName()).apply(destinationTable.getSchemaTableName()));
if (destinationTableHandle.isEmpty()) {
throw new TrinoException(TABLE_NOT_FOUND, format("Destination table %s from table scan redirection not found", destinationTable));
}
Map<ColumnHandle, String> columnMapping = tableScanRedirectApplicationResult.get().getDestinationColumns();
Map<String, ColumnHandle> destinationColumnHandles = plannerContext.getMetadata().getColumnHandles(context.getSession(), destinationTableHandle.get());
ImmutableMap.Builder<Symbol, Cast> casts = ImmutableMap.builder();
ImmutableMap.Builder<Symbol, ColumnHandle> newAssignmentsBuilder = ImmutableMap.builder();
for (Map.Entry<Symbol, ColumnHandle> assignment : scanNode.getAssignments().entrySet()) {
String destinationColumn = columnMapping.get(assignment.getValue());
if (destinationColumn == null) {
throw new TrinoException(COLUMN_NOT_FOUND, format("Did not find mapping for source column %s in table scan redirection", assignment.getValue()));
}
ColumnHandle destinationColumnHandle = destinationColumnHandles.get(destinationColumn);
if (destinationColumnHandle == null) {
throw new TrinoException(COLUMN_NOT_FOUND, format("Did not find handle for column %s in destination table %s", destinationColumn, destinationTable));
}
// insert ts if redirected types don't match source types
Type sourceType = context.getSymbolAllocator().getTypes().get(assignment.getKey());
Type redirectedType = plannerContext.getMetadata().getColumnMetadata(context.getSession(), destinationTableHandle.get(), destinationColumnHandle).getType();
if (!sourceType.equals(redirectedType)) {
Symbol redirectedSymbol = context.getSymbolAllocator().newSymbol(destinationColumn, redirectedType);
Cast cast = getCast(context.getSession(), destinationTable, destinationColumn, redirectedType, redirectedSymbol, sourceTable, assignment.getValue(), sourceType);
casts.put(assignment.getKey(), cast);
newAssignmentsBuilder.put(redirectedSymbol, destinationColumnHandle);
} else {
newAssignmentsBuilder.put(assignment.getKey(), destinationColumnHandle);
}
}
TupleDomain<String> requiredFilter = tableScanRedirectApplicationResult.get().getFilter();
if (requiredFilter.isAll()) {
ImmutableMap<Symbol, ColumnHandle> newAssignments = newAssignmentsBuilder.buildOrThrow();
return Result.ofPlanNode(applyProjection(context.getIdAllocator(), ImmutableSet.copyOf(scanNode.getOutputSymbols()), casts.buildOrThrow(), new TableScanNode(scanNode.getId(), destinationTableHandle.get(), ImmutableList.copyOf(newAssignments.keySet()), newAssignments, TupleDomain.all(), // Use table statistics from destination table
Optional.empty(), scanNode.isUpdateTarget(), Optional.empty())));
}
Map<ColumnHandle, Symbol> inverseAssignments = ImmutableBiMap.copyOf(scanNode.getAssignments()).inverse();
Map<String, ColumnHandle> inverseColumnsMapping = ImmutableBiMap.copyOf(columnMapping).inverse();
TupleDomain<Symbol> transformedConstraint = requiredFilter.transformKeys(destinationColumn -> {
ColumnHandle sourceColumnHandle = inverseColumnsMapping.get(destinationColumn);
if (sourceColumnHandle == null) {
throw new TrinoException(COLUMN_NOT_FOUND, format("Did not find mapping for destination column %s in table scan redirection", destinationColumn));
}
Symbol symbol = inverseAssignments.get(sourceColumnHandle);
if (symbol != null) {
// domain symbol should already be mapped in redirected table scan
return symbol;
}
// Column pruning after predicate is pushed into table scan can remove assignments for filter columns from the scan node
Type domainType = requiredFilter.getDomains().get().get(destinationColumn).getType();
symbol = context.getSymbolAllocator().newSymbol(destinationColumn, domainType);
ColumnHandle destinationColumnHandle = destinationColumnHandles.get(destinationColumn);
if (destinationColumnHandle == null) {
throw new TrinoException(COLUMN_NOT_FOUND, format("Did not find handle for column %s in destination table %s", destinationColumn, destinationTable));
}
// insert casts if redirected types don't match domain types
Type redirectedType = plannerContext.getMetadata().getColumnMetadata(context.getSession(), destinationTableHandle.get(), destinationColumnHandle).getType();
if (!domainType.equals(redirectedType)) {
Symbol redirectedSymbol = context.getSymbolAllocator().newSymbol(destinationColumn, redirectedType);
Cast cast = getCast(context.getSession(), destinationTable, destinationColumn, redirectedType, redirectedSymbol, sourceTable, sourceColumnHandle, domainType);
casts.put(symbol, cast);
newAssignmentsBuilder.put(redirectedSymbol, destinationColumnHandle);
} else {
newAssignmentsBuilder.put(symbol, destinationColumnHandle);
}
return symbol;
});
Map<Symbol, ColumnHandle> newAssignments = newAssignmentsBuilder.buildOrThrow();
TableScanNode newScanNode = new TableScanNode(scanNode.getId(), destinationTableHandle.get(), ImmutableList.copyOf(newAssignments.keySet()), newAssignments, TupleDomain.all(), // Use table statistics from destination table
Optional.empty(), scanNode.isUpdateTarget(), Optional.empty());
DomainTranslator domainTranslator = new DomainTranslator(plannerContext);
FilterNode filterNode = new FilterNode(context.getIdAllocator().getNextId(), applyProjection(context.getIdAllocator(), newAssignments.keySet(), casts.buildOrThrow(), newScanNode), domainTranslator.toPredicate(context.getSession(), transformedConstraint));
return Result.ofPlanNode(applyProjection(context.getIdAllocator(), ImmutableSet.copyOf(scanNode.getOutputSymbols()), ImmutableMap.of(), filterNode));
}
use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.
the class ImplementIntersectAll method apply.
@Override
public Result apply(IntersectNode node, Captures captures, Context context) {
SetOperationNodeTranslator translator = new SetOperationNodeTranslator(context.getSession(), metadata, context.getSymbolAllocator(), context.getIdAllocator());
SetOperationNodeTranslator.TranslationResult result = translator.makeSetContainmentPlanForAll(node);
// compute expected multiplicity for every row
checkState(result.getCountSymbols().size() > 0, "IntersectNode translation result has no count symbols");
ResolvedFunction least = metadata.resolveFunction(context.getSession(), QualifiedName.of("least"), fromTypes(BIGINT, BIGINT));
Expression minCount = result.getCountSymbols().get(0).toSymbolReference();
for (int i = 1; i < result.getCountSymbols().size(); i++) {
minCount = new FunctionCall(least.toQualifiedName(), ImmutableList.of(minCount, result.getCountSymbols().get(i).toSymbolReference()));
}
// filter rows so that expected number of rows remains
Expression removeExtraRows = new ComparisonExpression(LESS_THAN_OR_EQUAL, result.getRowNumberSymbol().toSymbolReference(), minCount);
FilterNode filter = new FilterNode(context.getIdAllocator().getNextId(), result.getPlanNode(), removeExtraRows);
// prune helper symbols
ProjectNode project = new ProjectNode(context.getIdAllocator().getNextId(), filter, Assignments.identity(node.getOutputSymbols()));
return Result.ofPlanNode(project);
}
use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.
the class ImplementLimitWithTies method rewriteLimitWithTiesWithPartitioning.
/**
* Rewrite LimitNode with ties to WindowNode and FilterNode, with partitioning defined by partitionBy.
* <p>
* This method does not prune outputs of the rewritten plan. After the rewrite, the output consists of
* source's output symbols and the newly created rankSymbol.
* Passing all input symbols is intentional, because this method is used for de-correlation in the scenario
* where the original LimitNode is in the correlated subquery, and the rewrite result is placed on top of
* de-correlated join.
* It is the responsibility of the caller to prune redundant outputs.
*/
public static PlanNode rewriteLimitWithTiesWithPartitioning(LimitNode limitNode, PlanNode source, Session session, Metadata metadata, PlanNodeIdAllocator idAllocator, SymbolAllocator symbolAllocator, List<Symbol> partitionBy) {
checkArgument(limitNode.isWithTies(), "Expected LimitNode with ties");
Symbol rankSymbol = symbolAllocator.newSymbol("rank_num", BIGINT);
WindowNode.Function rankFunction = new WindowNode.Function(metadata.resolveFunction(session, QualifiedName.of("rank"), ImmutableList.of()), ImmutableList.of(), DEFAULT_FRAME, false);
WindowNode windowNode = new WindowNode(idAllocator.getNextId(), source, new WindowNode.Specification(partitionBy, limitNode.getTiesResolvingScheme()), ImmutableMap.of(rankSymbol, rankFunction), Optional.empty(), ImmutableSet.of(), 0);
return new FilterNode(idAllocator.getNextId(), windowNode, new ComparisonExpression(ComparisonExpression.Operator.LESS_THAN_OR_EQUAL, rankSymbol.toSymbolReference(), new GenericLiteral("BIGINT", Long.toString(limitNode.getCount()))));
}
Aggregations