use of io.trino.sql.planner.plan.FilterNode in project trino by trinodb.
the class LogicalPlanner method createTableExecutePlan.
private RelationPlan createTableExecutePlan(Analysis analysis, TableExecute statement) {
Table table = statement.getTable();
TableHandle tableHandle = analysis.getTableHandle(table);
QualifiedObjectName tableName = createQualifiedObjectName(session, statement, table.getName());
TableExecuteHandle executeHandle = analysis.getTableExecuteHandle().orElseThrow();
RelationPlan tableScanPlan = createRelationPlan(analysis, table);
PlanBuilder sourcePlanBuilder = newPlanBuilder(tableScanPlan, analysis, ImmutableMap.of(), ImmutableMap.of());
if (statement.getWhere().isPresent()) {
SubqueryPlanner subqueryPlanner = new SubqueryPlanner(analysis, symbolAllocator, idAllocator, buildLambdaDeclarationToSymbolMap(analysis, symbolAllocator), plannerContext, typeCoercion, Optional.empty(), session, ImmutableMap.of());
Expression whereExpression = statement.getWhere().get();
sourcePlanBuilder = subqueryPlanner.handleSubqueries(sourcePlanBuilder, whereExpression, analysis.getSubqueries(statement));
sourcePlanBuilder = sourcePlanBuilder.withNewRoot(new FilterNode(idAllocator.getNextId(), sourcePlanBuilder.getRoot(), sourcePlanBuilder.rewrite(whereExpression)));
}
PlanNode sourcePlanRoot = sourcePlanBuilder.getRoot();
TableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle);
List<String> columnNames = tableMetadata.getColumns().stream().filter(// todo this filter is redundant
column -> !column.isHidden()).map(ColumnMetadata::getName).collect(toImmutableList());
TableWriterNode.TableExecuteTarget tableExecuteTarget = new TableWriterNode.TableExecuteTarget(executeHandle, Optional.empty(), tableName.asSchemaTableName());
Optional<TableLayout> layout = metadata.getLayoutForTableExecute(session, executeHandle);
List<Symbol> symbols = visibleFields(tableScanPlan);
// todo extract common method to be used here and in createTableWriterPlan()
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
Optional<PartitioningScheme> preferredPartitioningScheme = Optional.empty();
if (layout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
layout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
Optional<PartitioningHandle> partitioningHandle = layout.get().getPartitioning();
if (partitioningHandle.isPresent()) {
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle.get(), partitionFunctionArguments), outputLayout));
} else {
// empty connector partitioning handle means evenly partitioning on partitioning columns
preferredPartitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout));
}
}
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableExecuteNode(idAllocator.getNextId(), sourcePlanRoot, tableExecuteTarget, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, preferredPartitioningScheme), tableExecuteTarget, symbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
Aggregations