use of io.prestosql.sql.planner.plan.StatisticAggregations in project hetu-core by openlookeng.
the class TestExternalFunctionPushDownChecker method testTableWriterNodeWithExternalCall.
@Test(expectedExceptions = ExternalFunctionPushDownChecker.IllegalExternalFunctionUsageException.class, expectedExceptionsMessageRegExp = "The external function jdbc.v1.foo does not support to push down to data source for this query.")
public void testTableWriterNodeWithExternalCall() {
TableWriterNode.DeleteTarget deleteTarget = new TableWriterNode.DeleteTarget(tableHandle, new SchemaTableName("sch", "tab"));
PlanNode node = new TableWriterNode(idAllocator.getNextId(), builder.values(), deleteTarget, columnA, columnB, ImmutableList.of(columnA, columnB), ImmutableList.of("a", "b"), Optional.empty(), Optional.of(new StatisticAggregations(ImmutableMap.of(columnA, new AggregationNode.Aggregation(externalFooCall1, externalFooCall1.getArguments(), false, Optional.empty(), Optional.empty(), Optional.empty())), ImmutableList.of(columnA))), Optional.of(new StatisticAggregationsDescriptor<>(ImmutableMap.of(), ImmutableMap.of(), ImmutableMap.of())));
validatePlan(node);
}
use of io.prestosql.sql.planner.plan.StatisticAggregations in project hetu-core by openlookeng.
the class TestExternalFunctionPushDownChecker method testTableWriterNode.
@Test
public void testTableWriterNode() {
TableWriterNode.DeleteTarget deleteTarget = new TableWriterNode.DeleteTarget(tableHandle, new SchemaTableName("sch", "tab"));
PlanNode node = new TableWriterNode(idAllocator.getNextId(), builder.values(), deleteTarget, columnA, columnB, ImmutableList.of(columnA, columnB), ImmutableList.of("a", "b"), Optional.empty(), Optional.of(new StatisticAggregations(ImmutableMap.of(columnA, new AggregationNode.Aggregation(sumCall, sumCall.getArguments(), false, Optional.empty(), Optional.empty(), Optional.empty())), ImmutableList.of(columnA))), Optional.of(new StatisticAggregationsDescriptor<>(ImmutableMap.of(), ImmutableMap.of(), ImmutableMap.of())));
validatePlan(node);
}
use of io.prestosql.sql.planner.plan.StatisticAggregations in project hetu-core by openlookeng.
the class TestExternalFunctionPushDownChecker method testVacuumTableNode.
@Test
public void testVacuumTableNode() {
TableWriterNode.DeleteTarget deleteTarget = new TableWriterNode.DeleteTarget(tableHandle, new SchemaTableName("sch", "tab"));
PlanNode node = new VacuumTableNode(idAllocator.getNextId(), tableHandle, deleteTarget, columnA, columnB, "p1", false, ImmutableList.of(), Optional.of(new StatisticAggregations(ImmutableMap.of(columnA, new AggregationNode.Aggregation(sumCall, sumCall.getArguments(), false, Optional.empty(), Optional.empty(), Optional.empty())), ImmutableList.of(columnA))), Optional.of(new StatisticAggregationsDescriptor<>(ImmutableMap.of(), ImmutableMap.of(), ImmutableMap.of())));
validatePlan(node);
}
use of io.prestosql.sql.planner.plan.StatisticAggregations in project hetu-core by openlookeng.
the class LogicalPlanner method createTableWriterPlan.
private RelationPlan createTableWriterPlan(Analysis analysis, RelationPlan plan, WriterTarget target, List<String> columnNames, Optional<NewTableLayout> writeTableLayout, TableStatisticsMetadata statisticsMetadata) {
PlanNode source = plan.getRoot();
if (!analysis.isCreateTableAsSelectWithData()) {
source = new LimitNode(idAllocator.getNextId(), source, 0L, false);
}
// todo this should be checked in analysis
writeTableLayout.ifPresent(layout -> {
if (!ImmutableSet.copyOf(columnNames).containsAll(layout.getPartitionColumns())) {
throw new PrestoException(NOT_SUPPORTED, "INSERT must write all distribution columns: " + layout.getPartitionColumns());
}
});
List<Symbol> symbols = plan.getFieldMappings();
Optional<PartitioningScheme> partitioningScheme = Optional.empty();
if (writeTableLayout.isPresent()) {
List<Symbol> partitionFunctionArguments = new ArrayList<>();
writeTableLayout.get().getPartitionColumns().stream().mapToInt(columnNames::indexOf).mapToObj(symbols::get).forEach(partitionFunctionArguments::add);
List<Symbol> outputLayout = new ArrayList<>(symbols);
PartitioningHandle partitioningHandle = writeTableLayout.get().getPartitioning().orElse(FIXED_HASH_DISTRIBUTION);
partitioningScheme = Optional.of(new PartitioningScheme(Partitioning.create(partitioningHandle, partitionFunctionArguments), outputLayout));
}
if (!statisticsMetadata.isEmpty()) {
verify(columnNames.size() == symbols.size(), "columnNames.size() != symbols.size(): %s and %s", columnNames, symbols);
Map<String, Symbol> columnToSymbolMap = zip(columnNames.stream(), symbols.stream(), SimpleImmutableEntry::new).collect(toImmutableMap(Entry::getKey, Entry::getValue));
TableStatisticAggregation result = statisticsAggregationPlanner.createStatisticsAggregation(statisticsMetadata, columnToSymbolMap);
StatisticAggregations.Parts aggregations = result.getAggregations().createPartialAggregations(planSymbolAllocator, metadata);
// partial aggregation is run within the TableWriteOperator to calculate the statistics for
// the data consumed by the TableWriteOperator
// final aggregation is run within the TableFinishOperator to summarize collected statistics
// by the partial aggregation from all of the writer nodes
StatisticAggregations partialAggregation = aggregations.getPartialAggregation();
PlanNode writerNode = new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.of(partialAggregation), Optional.of(result.getDescriptor().map(aggregations.getMappings()::get)));
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), writerNode, target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.of(aggregations.getFinalAggregation()), Optional.of(result.getDescriptor()));
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
}
TableFinishNode commitNode = new TableFinishNode(idAllocator.getNextId(), new TableWriterNode(idAllocator.getNextId(), source, target, planSymbolAllocator.newSymbol("partialrows", BIGINT), planSymbolAllocator.newSymbol("fragment", VARBINARY), symbols, columnNames, partitioningScheme, Optional.empty(), Optional.empty()), target, planSymbolAllocator.newSymbol("rows", BIGINT), Optional.empty(), Optional.empty());
return new RelationPlan(commitNode, analysis.getRootScope(), commitNode.getOutputSymbols());
}
use of io.prestosql.sql.planner.plan.StatisticAggregations in project hetu-core by openlookeng.
the class StatisticsAggregationPlanner method createStatisticsAggregation.
public TableStatisticAggregation createStatisticsAggregation(TableStatisticsMetadata statisticsMetadata, Map<String, Symbol> columnToSymbolMap) {
StatisticAggregationsDescriptor.Builder<Symbol> descriptor = StatisticAggregationsDescriptor.builder();
List<String> groupingColumns = statisticsMetadata.getGroupingColumns();
List<Symbol> groupingSymbols = groupingColumns.stream().map(columnToSymbolMap::get).collect(toImmutableList());
for (int i = 0; i < groupingSymbols.size(); i++) {
descriptor.addGrouping(groupingColumns.get(i), groupingSymbols.get(i));
}
ImmutableMap.Builder<Symbol, AggregationNode.Aggregation> aggregations = ImmutableMap.builder();
StandardFunctionResolution functionResolution = new FunctionResolution(metadata.getFunctionAndTypeManager());
for (TableStatisticType type : statisticsMetadata.getTableStatistics()) {
if (type != ROW_COUNT) {
throw new PrestoException(NOT_SUPPORTED, "Table-wide statistic type not supported: " + type);
}
AggregationNode.Aggregation aggregation = new AggregationNode.Aggregation(new CallExpression("count", functionResolution.countFunction(), BIGINT, ImmutableList.of(), Optional.empty()), ImmutableList.of(), false, Optional.empty(), Optional.empty(), Optional.empty());
Symbol symbol = planSymbolAllocator.newSymbol("rowCount", BIGINT);
aggregations.put(symbol, aggregation);
descriptor.addTableStatistic(ROW_COUNT, symbol);
}
for (ColumnStatisticMetadata columnStatisticMetadata : statisticsMetadata.getColumnStatistics()) {
String columnName = columnStatisticMetadata.getColumnName();
ColumnStatisticType statisticType = columnStatisticMetadata.getStatisticType();
Symbol inputSymbol = columnToSymbolMap.get(columnName);
verify(inputSymbol != null, "inputSymbol is null");
Type inputType = planSymbolAllocator.getTypes().get(inputSymbol);
verify(inputType != null, "inputType is null for symbol: %s", inputSymbol);
ColumnStatisticsAggregation aggregation = createColumnAggregation(statisticType, inputSymbol, inputType);
Symbol symbol = planSymbolAllocator.newSymbol(statisticType + ":" + columnName, aggregation.getOutputType());
aggregations.put(symbol, aggregation.getAggregation());
descriptor.addColumnStatistic(columnStatisticMetadata, symbol);
}
StatisticAggregations aggregation = new StatisticAggregations(aggregations.build(), groupingSymbols);
return new TableStatisticAggregation(aggregation, descriptor.build());
}
Aggregations