use of io.trino.sql.analyzer.Analysis in project trino by trinodb.
the class SqlQueryExecution method analyze.
private static Analysis analyze(PreparedQuery preparedQuery, QueryStateMachine stateMachine, WarningCollector warningCollector, AnalyzerFactory analyzerFactory) {
stateMachine.beginAnalysis();
requireNonNull(preparedQuery, "preparedQuery is null");
Analyzer analyzer = analyzerFactory.createAnalyzer(stateMachine.getSession(), preparedQuery.getParameters(), parameterExtractor(preparedQuery.getStatement(), preparedQuery.getParameters()), warningCollector);
Analysis analysis;
try {
analysis = analyzer.analyze(preparedQuery.getStatement());
} catch (StackOverflowError e) {
throw new TrinoException(STACK_OVERFLOW, "statement is too large (stack overflow during analysis)", e);
}
stateMachine.setUpdateType(analysis.getUpdateType());
stateMachine.setReferencedTables(analysis.getReferencedTables());
stateMachine.setRoutines(analysis.getRoutines());
stateMachine.endAnalysis();
return analysis;
}
use of io.trino.sql.analyzer.Analysis in project trino by trinodb.
the class LogicalPlanner method getInsertPlan.
private RelationPlan getInsertPlan(Analysis analysis, Table table, Query query, TableHandle tableHandle, List<ColumnHandle> insertColumns, Optional<TableLayout> newTableLayout, Optional<WriterTarget> materializedViewRefreshWriterTarget) {
TableMetadata tableMetadata = metadata.getTableMetadata(session, tableHandle);
Map<NodeRef<LambdaArgumentDeclaration>, Symbol> lambdaDeclarationToSymbolMap = buildLambdaDeclarationToSymbolMap(analysis, symbolAllocator);
RelationPlanner planner = new RelationPlanner(analysis, symbolAllocator, idAllocator, lambdaDeclarationToSymbolMap, plannerContext, Optional.empty(), session, ImmutableMap.of());
RelationPlan plan = planner.process(query, null);
ImmutableList.Builder<Symbol> builder = ImmutableList.builder();
for (int i = 0; i < plan.getFieldMappings().size(); i++) {
if (!plan.getDescriptor().getFieldByIndex(i).isHidden()) {
builder.add(plan.getFieldMappings().get(i));
}
}
List<Symbol> visibleFieldMappings = builder.build();
Map<String, ColumnHandle> columns = metadata.getColumnHandles(session, tableHandle);
Assignments.Builder assignments = Assignments.builder();
boolean supportsMissingColumnsOnInsert = metadata.supportsMissingColumnsOnInsert(session, tableHandle);
ImmutableList.Builder<ColumnMetadata> insertedColumnsBuilder = ImmutableList.builder();
for (ColumnMetadata column : tableMetadata.getColumns()) {
if (column.isHidden()) {
continue;
}
Symbol output = symbolAllocator.newSymbol(column.getName(), column.getType());
int index = insertColumns.indexOf(columns.get(column.getName()));
if (index < 0) {
if (supportsMissingColumnsOnInsert) {
continue;
}
Expression cast = new Cast(new NullLiteral(), toSqlType(column.getType()));
assignments.put(output, cast);
insertedColumnsBuilder.add(column);
} else {
Symbol input = visibleFieldMappings.get(index);
Type tableType = column.getType();
Type queryType = symbolAllocator.getTypes().get(input);
if (queryType.equals(tableType) || typeCoercion.isTypeOnlyCoercion(queryType, tableType)) {
assignments.put(output, input.toSymbolReference());
} else {
Expression cast = noTruncationCast(input.toSymbolReference(), queryType, tableType);
assignments.put(output, cast);
}
insertedColumnsBuilder.add(column);
}
}
ProjectNode projectNode = new ProjectNode(idAllocator.getNextId(), plan.getRoot(), assignments.build());
List<ColumnMetadata> insertedColumns = insertedColumnsBuilder.build();
List<Field> fields = insertedColumns.stream().map(column -> Field.newUnqualified(column.getName(), column.getType())).collect(toImmutableList());
Scope scope = Scope.builder().withRelationType(RelationId.anonymous(), new RelationType(fields)).build();
plan = new RelationPlan(projectNode, scope, projectNode.getOutputSymbols(), Optional.empty());
plan = planner.addRowFilters(table, plan, failIfPredicateIsNotMeet(metadata, session, PERMISSION_DENIED, AccessDeniedException.PREFIX + "Cannot insert row that does not match to a row filter"), node -> {
Scope accessControlScope = analysis.getAccessControlScope(table);
// hidden fields are not accessible in insert
return Scope.builder().like(accessControlScope).withRelationType(accessControlScope.getRelationId(), accessControlScope.getRelationType().withOnlyVisibleFields()).build();
});
List<String> insertedTableColumnNames = insertedColumns.stream().map(ColumnMetadata::getName).collect(toImmutableList());
String catalogName = tableHandle.getCatalogName().getCatalogName();
TableStatisticsMetadata statisticsMetadata = metadata.getStatisticsCollectionMetadataForWrite(session, catalogName, tableMetadata.getMetadata());
if (materializedViewRefreshWriterTarget.isPresent()) {
return createTableWriterPlan(analysis, plan.getRoot(), plan.getFieldMappings(), materializedViewRefreshWriterTarget.get(), insertedTableColumnNames, insertedColumns, newTableLayout, statisticsMetadata);
}
InsertReference insertTarget = new InsertReference(tableHandle, insertedTableColumnNames.stream().map(columns::get).collect(toImmutableList()));
return createTableWriterPlan(analysis, plan.getRoot(), plan.getFieldMappings(), insertTarget, insertedTableColumnNames, insertedColumns, newTableLayout, statisticsMetadata);
}
use of io.trino.sql.analyzer.Analysis in project trino by trinodb.
the class LogicalPlanner method createTableCreationPlan.
private RelationPlan createTableCreationPlan(Analysis analysis, Query query) {
Analysis.Create create = analysis.getCreate().orElseThrow();
QualifiedObjectName destination = create.getDestination().orElseThrow();
RelationPlan plan = createRelationPlan(analysis, query);
if (!create.isCreateTableAsSelectWithData()) {
PlanNode root = new LimitNode(idAllocator.getNextId(), plan.getRoot(), 0L, false);
plan = new RelationPlan(root, plan.getScope(), plan.getFieldMappings(), Optional.empty());
}
ConnectorTableMetadata tableMetadata = create.getMetadata().orElseThrow();
Optional<TableLayout> newTableLayout = create.getLayout();
List<String> columnNames = tableMetadata.getColumns().stream().filter(// todo this filter is redundant
column -> !column.isHidden()).map(ColumnMetadata::getName).collect(toImmutableList());
TableStatisticsMetadata statisticsMetadata = metadata.getStatisticsCollectionMetadataForWrite(session, destination.getCatalogName(), tableMetadata);
return createTableWriterPlan(analysis, plan.getRoot(), visibleFields(plan), new CreateReference(destination.getCatalogName(), tableMetadata, newTableLayout), columnNames, tableMetadata.getColumns(), newTableLayout, statisticsMetadata);
}
use of io.trino.sql.analyzer.Analysis in project trino by trinodb.
the class LogicalPlanner method createTableWriterPlan.
private RelationPlan createTableWriterPlan(Analysis analysis, PlanNode source, List<Symbol> symbols, WriterTarget target, List<String> columnNames, List<ColumnMetadata> columnMetadataList, Optional<TableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
Optional<PartitioningScheme> preferredPartitioningScheme = Optional.empty();
if (writeTableLayout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
Optional<PartitioningHandle> partitioningHandle = writeTableLayout.get().getPartitioning();
if (partitioningHandle.isPresent()) {
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle.get(), partitionFunctionArguments), outputLayout));
} else {
// empty connector partitioning handle means evenly partitioning on partitioning columns
preferredPartitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(FIXED_HASH_DISTRIBUTION, partitionFunctionArguments), outputLayout));
}
}
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
Set<Symbol> notNullColumnSymbols = columnMetadataList.stream().filter(column -> !column.isNullable()).map(ColumnMetadata::getName).map(columnToSymbolMap::get).collect(toImmutableSet());
if (!statisticsMetadata.isEmpty()) {
TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(symbolAllocator, plannerContext);
// partial aggregation is run within the TableWriteOperator to calculate the statistics for
// the data consumed by the TableWriteOperator
// final aggregation is run within the TableFinishOperator to summarize collected statistics
// by the partial aggregation from all of the writer nodes
StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get))), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, symbolAllocator.newSymbol("partialrows", BIGINT), symbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, notNullColumnSymbols, partitioningScheme, preferredPartitioningScheme, Optional.empty(), Optional.empty()), target, symbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols(), Optional.empty());
}
use of io.trino.sql.analyzer.Analysis in project trino by trinodb.
the class QueryPlanner method planGroupingSets.
private GroupingSetsPlan planGroupingSets(PlanBuilder subPlan, QuerySpecification node, GroupingSetAnalysis groupingSetAnalysis) {
Map<Symbol, Symbol> groupingSetMappings = new LinkedHashMap<>();
// Compute a set of artificial columns that will contain the values of the original columns
// filtered by whether the column is included in the grouping set
// This will become the basis for the scope for any column references
Symbol[] fields = new Symbol[subPlan.getTranslations().getFieldSymbols().size()];
for (FieldId field : groupingSetAnalysis.getAllFields()) {
Symbol input = subPlan.getTranslations().getFieldSymbols().get(field.getFieldIndex());
Symbol output = symbolAllocator.newSymbol(input, "gid");
fields[field.getFieldIndex()] = output;
groupingSetMappings.put(output, input);
}
Map<ScopeAware<Expression>, Symbol> complexExpressions = new HashMap<>();
for (Expression expression : groupingSetAnalysis.getComplexExpressions()) {
if (!complexExpressions.containsKey(scopeAwareKey(expression, analysis, subPlan.getScope()))) {
Symbol input = subPlan.translate(expression);
Symbol output = symbolAllocator.newSymbol(expression, analysis.getType(expression), "gid");
complexExpressions.put(scopeAwareKey(expression, analysis, subPlan.getScope()), output);
groupingSetMappings.put(output, input);
}
}
// For the purpose of "distinct", we need to canonicalize column references that may have varying
// syntactic forms (e.g., "t.a" vs "a"). Thus we need to enumerate grouping sets based on the underlying
// fieldId associated with each column reference expression.
// The catch is that simple group-by expressions can be arbitrary expressions (this is a departure from the SQL specification).
// But, they don't affect the number of grouping sets or the behavior of "distinct" . We can compute all the candidate
// grouping sets in terms of fieldId, dedup as appropriate and then cross-join them with the complex expressions.
// This tracks the grouping sets before complex expressions are considered.
// It's also used to compute the descriptors needed to implement grouping()
List<Set<FieldId>> columnOnlyGroupingSets = enumerateGroupingSets(groupingSetAnalysis);
if (node.getGroupBy().isPresent() && node.getGroupBy().get().isDistinct()) {
columnOnlyGroupingSets = columnOnlyGroupingSets.stream().distinct().collect(toImmutableList());
}
// translate from FieldIds to Symbols
List<List<Symbol>> sets = columnOnlyGroupingSets.stream().map(set -> set.stream().map(FieldId::getFieldIndex).map(index -> fields[index]).collect(toImmutableList())).collect(toImmutableList());
// combine (cartesian product) with complex expressions
List<List<Symbol>> groupingSets = sets.stream().map(set -> ImmutableList.<Symbol>builder().addAll(set).addAll(complexExpressions.values()).build()).collect(toImmutableList());
// Generate GroupIdNode (multiple grouping sets) or ProjectNode (single grouping set)
PlanNode groupId;
Optional<Symbol> groupIdSymbol = Optional.empty();
if (groupingSets.size() > 1) {
groupIdSymbol = Optional.of(symbolAllocator.newSymbol("groupId", BIGINT));
groupId = new GroupIdNode(idAllocator.getNextId(), subPlan.getRoot(), groupingSets, groupingSetMappings, subPlan.getRoot().getOutputSymbols(), groupIdSymbol.get());
} else {
Assignments.Builder assignments = Assignments.builder();
assignments.putIdentities(subPlan.getRoot().getOutputSymbols());
groupingSetMappings.forEach((key, value) -> assignments.put(key, value.toSymbolReference()));
groupId = new ProjectNode(idAllocator.getNextId(), subPlan.getRoot(), assignments.build());
}
subPlan = new PlanBuilder(subPlan.getTranslations().withNewMappings(complexExpressions, Arrays.asList(fields)), groupId);
return new GroupingSetsPlan(subPlan, columnOnlyGroupingSets, groupingSets, groupIdSymbol);
}
Aggregations