use of io.trino.sql.PlannerContext in project trino by trinodb.
the class LogicalPlanner method createTableWriterPlan.
private RelationPlan createTableWriterPlan(Analysis analysis, PlanNode source, List<Symbol> symbols, WriterTarget target, List<String> columnNames, List<ColumnMetadata> columnMetadataList, Optional<TableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
Optional<PartitioningScheme> preferredPartitioningScheme = Optional.empty();
if (writeTableLayout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
Optional<PartitioningHandle> partitioningHandle = writeTableLayout.get().getPartitioning();
if (partitioningHandle.isPresent()) {
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle.get(), partitionFunctionArguments), outputLayout));
} else {
// empty connector partitioning handle means evenly partitioning on partitioning columns
preferredPartitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout));
}
}
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
Set<Symbol> notNullColumnSymbols = columnMetadataList.stream().filter(column -> !column.isNullable()).map(ColumnMetadata::getName).map(columnToSymbolMap::get).collect(toImmutableSet());
if (!statisticsMetadata.isEmpty()) {
TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(symbolAllocator, plannerContext);
// partial aggregation is run within the TableWriteOperator to calculate the statistics for
// the data consumed by the TableWriteOperator
// final aggregation is run within the TableFinishOperator to summarize collected statistics
// by the partial aggregation from all of the writer nodes
StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get))), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.empty(), Optional.empty()), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
use of io.trino.sql.PlannerContext in project trino by trinodb.
the class QueryPlanner method plan.
public UpdateNode plan(Update node) {
Table table = node.getTable();
TableHandle handle = analysis.getTableHandle(table);
TableSchema tableSchema = plannerContext.getMetadata().getTableSchema(session, handle);
Map<String, ColumnHandle> columnMap = plannerContext.getMetadata().getColumnHandles(session, handle);
List<ColumnSchema> columnsSchemas = tableSchema.getColumns();
List<String> targetColumnNames = node.getAssignments().stream().map(assignment -> assignment.getName().getValue()).collect(toImmutableList());
// Create lists of columnnames and SET expressions, in table column order
ImmutableList.Builder<String> updatedColumnNamesBuilder = ImmutableList.builder();
ImmutableList.Builder<ColumnHandle> updatedColumnHandlesBuilder = ImmutableList.builder();
ImmutableList.Builder<Expression> orderedColumnValuesBuilder = ImmutableList.builder();
for (ColumnSchema columnSchema : columnsSchemas) {
String name = columnSchema.getName();
int index = targetColumnNames.indexOf(name);
if (index >= 0) {
updatedColumnNamesBuilder.add(name);
updatedColumnHandlesBuilder.add(requireNonNull(columnMap.get(name), "columnMap didn't contain name"));
orderedColumnValuesBuilder.add(node.getAssignments().get(index).getValue());
}
}
List<String> updatedColumnNames = updatedColumnNamesBuilder.build();
List<ColumnHandle> updatedColumnHandles = updatedColumnHandlesBuilder.build();
List<Expression> orderedColumnValues = orderedColumnValuesBuilder.build();
// create table scan
RelationPlan relationPlan = new RelationPlanner(analysis, symbolAllocator, idAllocator, lambdaDeclarationToSymbolMap, plannerContext, outerContext, session, recursiveSubqueries).process(table, null);
PlanBuilder builder = newPlanBuilder(relationPlan, analysis, lambdaDeclarationToSymbolMap);
if (node.getWhere().isPresent()) {
builder = filter(builder, node.getWhere().get(), node);
}
builder = subqueryPlanner.handleSubqueries(builder, orderedColumnValues, analysis.getSubqueries(node));
builder = builder.appendProjections(orderedColumnValues, symbolAllocator, idAllocator);
PlanAndMappings planAndMappings = coerce(builder, orderedColumnValues, analysis, idAllocator, symbolAllocator, typeCoercion);
builder = planAndMappings.getSubPlan();
ImmutableList.Builder<Symbol> updatedColumnValuesBuilder = ImmutableList.builder();
orderedColumnValues.forEach(columnValue -> updatedColumnValuesBuilder.add(planAndMappings.get(columnValue)));
Symbol rowId = builder.translate(analysis.getRowIdField(table));
updatedColumnValuesBuilder.add(rowId);
List<Symbol> outputs = ImmutableList.of(symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY));
Optional<PlanNodeId> tableScanId = getIdForLeftTableScan(relationPlan.getRoot());
checkArgument(tableScanId.isPresent(), "tableScanId not present");
// create update node
return new UpdateNode(idAllocator.getNextId(), builder.getRoot(), new UpdateTarget(Optional.empty(), plannerContext.getMetadata().getTableMetadata(session, handle).getTable(), updatedColumnNames, updatedColumnHandles), rowId, updatedColumnValuesBuilder.build(), outputs);
}
use of io.trino.sql.PlannerContext in project trino by trinodb.
the class TestCallTask method executeCallTask.
private void executeCallTask(MethodHandle methodHandle, Function<TransactionManager, AccessControl> accessControlProvider) {
TransactionManager transactionManager = queryRunner.getTransactionManager();
ProcedureRegistry procedureRegistry = createProcedureRegistry(new Procedure("test", "testing_procedure", ImmutableList.of(), methodHandle));
AccessControl accessControl = accessControlProvider.apply(transactionManager);
PlannerContext plannerContext = plannerContextBuilder().withTransactionManager(transactionManager).build();
new CallTask(transactionManager, plannerContext, accessControl, procedureRegistry).execute(new Call(QualifiedName.of("testing_procedure"), ImmutableList.of()), stateMachine(transactionManager, plannerContext.getMetadata(), accessControl), ImmutableList.of(), WarningCollector.NOOP);
}
use of io.trino.sql.PlannerContext in project trino by trinodb.
the class TestCreateTableTask method testCreateWithNotNullColumns.
@Test
public void testCreateWithNotNullColumns() {
metadata.setConnectorCapabilities(NOT_NULL_COLUMN_CONSTRAINT);
List<TableElement> inputColumns = ImmutableList.of(new ColumnDefinition(identifier("a"), toSqlType(DATE), true, emptyList(), Optional.empty()), new ColumnDefinition(identifier("b"), toSqlType(VARCHAR), false, emptyList(), Optional.empty()), new ColumnDefinition(identifier("c"), toSqlType(VARBINARY), false, emptyList(), Optional.empty()));
CreateTable statement = new CreateTable(QualifiedName.of("test_table"), inputColumns, true, ImmutableList.of(), Optional.empty());
CreateTableTask createTableTask = new CreateTableTask(plannerContext, new AllowAllAccessControl(), columnPropertyManager, tablePropertyManager);
getFutureValue(createTableTask.internalExecute(statement, testSession, emptyList(), output -> {
}));
assertEquals(metadata.getCreateTableCallCount(), 1);
List<ColumnMetadata> columns = metadata.getReceivedTableMetadata().get(0).getColumns();
assertEquals(columns.size(), 3);
assertEquals(columns.get(0).getName(), "a");
assertEquals(columns.get(0).getType().getDisplayName().toUpperCase(ENGLISH), "DATE");
assertTrue(columns.get(0).isNullable());
assertEquals(columns.get(1).getName(), "b");
assertEquals(columns.get(1).getType().getDisplayName().toUpperCase(ENGLISH), "VARCHAR");
assertFalse(columns.get(1).isNullable());
assertEquals(columns.get(2).getName(), "c");
assertEquals(columns.get(2).getType().getDisplayName().toUpperCase(ENGLISH), "VARBINARY");
assertFalse(columns.get(2).isNullable());
}
use of io.trino.sql.PlannerContext in project trino by trinodb.
the class TestCreateTableTask method testCreateLike.
@Test
public void testCreateLike() {
CreateTable statement = getCreatleLikeStatement(false);
CreateTableTask createTableTask = new CreateTableTask(plannerContext, new AllowAllAccessControl(), columnPropertyManager, tablePropertyManager);
getFutureValue(createTableTask.internalExecute(statement, testSession, List.of(), output -> {
}));
assertEquals(metadata.getCreateTableCallCount(), 1);
assertThat(metadata.getReceivedTableMetadata().get(0).getColumns()).isEqualTo(PARENT_TABLE.getColumns());
assertThat(metadata.getReceivedTableMetadata().get(0).getProperties()).isEmpty();
}
Aggregations