use of io.trino.sql.planner.plan.TableWriterNode.WriterTarget in project trino by trinodb.
the class LocalExecutionPlanner method createTableFinisher.
private static TableFinisher createTableFinisher(Session session, TableFinishNode node, Metadata metadata) {
WriterTarget target = node.getTarget();
return (fragments, statistics, tableExecuteContext) -> {
if (target instanceof CreateTarget) {
return metadata.finishCreateTable(session, ((CreateTarget) target).getHandle(), fragments, statistics);
} else if (target instanceof InsertTarget) {
return metadata.finishInsert(session, ((InsertTarget) target).getHandle(), fragments, statistics);
} else if (target instanceof TableWriterNode.RefreshMaterializedViewTarget) {
TableWriterNode.RefreshMaterializedViewTarget refreshTarget = (TableWriterNode.RefreshMaterializedViewTarget) target;
return metadata.finishRefreshMaterializedView(session, refreshTarget.getTableHandle(), refreshTarget.getInsertHandle(), fragments, statistics, refreshTarget.getSourceTableHandles());
} else if (target instanceof DeleteTarget) {
metadata.finishDelete(session, ((DeleteTarget) target).getHandleOrElseThrow(), fragments);
return Optional.empty();
} else if (target instanceof UpdateTarget) {
metadata.finishUpdate(session, ((UpdateTarget) target).getHandleOrElseThrow(), fragments);
return Optional.empty();
} else if (target instanceof TableExecuteTarget) {
TableExecuteHandle tableExecuteHandle = ((TableExecuteTarget) target).getExecuteHandle();
metadata.finishTableExecute(session, tableExecuteHandle, fragments, tableExecuteContext.getSplitsInfo());
return Optional.empty();
} else {
throw new AssertionError("Unhandled target type: " + target.getClass().getName());
}
};
}
use of io.trino.sql.planner.plan.TableWriterNode.WriterTarget in project trino by trinodb.
the class LogicalPlanner method getInsertPlan.
private RelationPlan getInsertPlan(Analysis analysis, Table table, Query query, TableHandle tableHandle, List<ColumnHandle> insertColumns, Optional<TableLayout> newTableLayout, Optional<WriterTarget> materializedViewRefreshWriterTarget) {
TableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle);
Map<NodeRef<LambdaArgumentDeclaration>, Symbol> lambdaDeclarationToSymbolMap = buildLambdaDeclarationToSymbolMap(analysis, symbolAllocator);
RelationPlanner planner = new RelationPlanner(analysis, symbolAllocator, idAllocator, lambdaDeclarationToSymbolMap, plannerContext, Optional.empty(), session, ImmutableMap.of());
RelationPlan plan = planner.process(query, null);
ImmutableList.Builder<Symbol> builder = ImmutableList.builder();
for (int i = 0; i < plan.getFieldMappings().size(); i++) {
if (!plan.getDescriptor().getFieldByIndex(i).isHidden()) {
builder.add(plan.getFieldMappings().get(i));
}
}
List<Symbol> visibleFieldMappings = builder.build();
Map<String, ColumnHandle> columns = metadata.getColumnHandles(session, tableHandle);
Assignments.Builder assignments = Assignments.builder();
boolean supportsMissingColumnsOnInsert = metadata.supportsMissingColumnsOnInsert(session, tableHandle);
ImmutableList.Builder<ColumnMetadata> insertedColumnsBuilder = ImmutableList.builder();
for (ColumnMetadata column : tableMetadata.getColumns()) {
if (column.isHidden()) {
continue;
}
Symbol output = symbolAllocator.newSymbol(column.getName(), column.getType());
int index = insertColumns.indexOf(columns.get(column.getName()));
if (index < 0) {
if (supportsMissingColumnsOnInsert) {
continue;
}
Expression cast = new Cast(new NullLiteral(), toSqlType(column.getType()));
assignments.put(output, cast);
insertedColumnsBuilder.add(column);
} else {
Symbol input = visibleFieldMappings.get(index);
Type tableType = column.getType();
Type queryType = symbolAllocator.getTypes().get(input);
if (queryType.equals(tableType) || typeCoercion.isTypeOnlyCoercion(queryType, tableType)) {
assignments.put(output, input.toSymbolReference());
} else {
Expression cast = noTruncationCast(input.toSymbolReference(), queryType, tableType);
assignments.put(output, cast);
}
insertedColumnsBuilder.add(column);
}
}
ProjectNode projectNode = new ProjectNode(idAllocator.getNextId(), plan.getRoot(), assignments.build());
List<ColumnMetadata> insertedColumns = insertedColumnsBuilder.build();
List<Field> fields = insertedColumns.stream().map(column -> Field.newUnqualified(column.getName(), column.getType())).collect(toImmutableList());
Scope scope = Scope.builder().withRelationType(RelationId.anonymous(), new RelationType(fields)).build();
plan = new RelationPlan(projectNode, scope, projectNode.getOutputSymbols(), Optional.empty());
plan = planner.addRowFilters(table, plan, failIfPredicateIsNotMeet(metadata, session, PERMISSION_DENIED, AccessDeniedException.PREFIX + "Cannot insert row that does not match to a row filter"), node -> {
Scope accessControlScope = analysis.getAccessControlScope(table);
// hidden fields are not accessible in insert
return Scope.builder().like(accessControlScope).withRelationType(accessControlScope.getRelationId(), accessControlScope.getRelationType().withOnlyVisibleFields()).build();
});
List<String> insertedTableColumnNames = insertedColumns.stream().map(ColumnMetadata::getName).collect(toImmutableList());
String catalogName = tableHandle.getCatalogName().getCatalogName();
TableStatisticsMetadata statisticsMetadata = metadata.getStatisticsCollectionMetadataForWrite(session, catalogName, tableMetadata.getMetadata());
if (materializedViewRefreshWriterTarget.isPresent()) {
return createTableWriterPlan(analysis, plan.getRoot(), plan.getFieldMappings(), materializedViewRefreshWriterTarget.get(), insertedTableColumnNames, insertedColumns, newTableLayout, statisticsMetadata);
}
InsertReference insertTarget = new InsertReference(tableHandle, insertedTableColumnNames.stream().map(columns::get).collect(toImmutableList()));
return createTableWriterPlan(analysis, plan.getRoot(), plan.getFieldMappings(), insertTarget, insertedTableColumnNames, insertedColumns, newTableLayout, statisticsMetadata);
}
use of io.trino.sql.planner.plan.TableWriterNode.WriterTarget in project trino by trinodb.
the class LogicalPlanner method createTableWriterPlan.
private RelationPlan createTableWriterPlan(Analysis analysis, PlanNode source, List<Symbol> symbols, WriterTarget target, List<String> columnNames, List<ColumnMetadata> columnMetadataList, Optional<TableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
Optional<PartitioningScheme> preferredPartitioningScheme = Optional.empty();
if (writeTableLayout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
Optional<PartitioningHandle> partitioningHandle = writeTableLayout.get().getPartitioning();
if (partitioningHandle.isPresent()) {
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle.get(), partitionFunctionArguments), outputLayout));
} else {
// empty connector partitioning handle means evenly partitioning on partitioning columns
preferredPartitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout));
}
}
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
Set<Symbol> notNullColumnSymbols = columnMetadataList.stream().filter(column -> !column.isNullable()).map(ColumnMetadata::getName).map(columnToSymbolMap::get).collect(toImmutableSet());
if (!statisticsMetadata.isEmpty()) {
TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(symbolAllocator, plannerContext);
// partial aggregation is run within the TableWriteOperator to calculate the statistics for
// the data consumed by the TableWriteOperator
// final aggregation is run within the TableFinishOperator to summarize collected statistics
// by the partial aggregation from all of the writer nodes
StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get))), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.empty(), Optional.empty()), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
Aggregations