use of io.prestosql.sql.planner.TypeAnalyzer in project hetu-core by openlookeng.
the class SimplifyExpressions method createRewrite.
private static ExpressionRewriter createRewrite(Metadata metadata, TypeAnalyzer typeAnalyzer) {
requireNonNull(metadata, "metadata is null");
requireNonNull(typeAnalyzer, "typeAnalyzer is null");
LiteralEncoder literalEncoder = new LiteralEncoder(metadata);
return (expression, context) -> rewrite(expression, context.getSession(), context.getSymbolAllocator(), metadata, literalEncoder, typeAnalyzer);
}
use of io.prestosql.sql.planner.TypeAnalyzer in project hetu-core by openlookeng.
the class ExtractSpatialJoins method tryCreateSpatialJoin.
private static Result tryCreateSpatialJoin(Context context, JoinNode joinNode, RowExpression filter, PlanNodeId nodeId, List<Symbol> outputSymbols, CallExpression spatialFunction, Optional<RowExpression> radius, Metadata metadata, SplitManager splitManager, PageSourceManager pageSourceManager, TypeAnalyzer typeAnalyzer) {
// TODO Add support for distributed left spatial joins
Optional<String> spatialPartitioningTableName = joinNode.getType() == INNER ? getSpatialPartitioningTableName(context.getSession()) : Optional.empty();
Optional<KdbTree> kdbTree = spatialPartitioningTableName.map(tableName -> loadKdbTree(tableName, context.getSession(), metadata, splitManager, pageSourceManager, nodeId));
List<RowExpression> arguments = spatialFunction.getArguments();
verify(arguments.size() == 2);
RowExpression firstArgument = arguments.get(0);
RowExpression secondArgument = arguments.get(1);
Type sphericalGeographyType = metadata.getType(SPHERICAL_GEOGRAPHY_TYPE_SIGNATURE);
if (firstArgument.getType().equals(sphericalGeographyType) || secondArgument.getType().equals(sphericalGeographyType)) {
if (joinNode.getType() != INNER) {
return Result.empty();
}
}
Set<Symbol> firstSymbols = extractUnique(firstArgument);
Set<Symbol> secondSymbols = extractUnique(secondArgument);
if (firstSymbols.isEmpty() || secondSymbols.isEmpty()) {
return Result.empty();
}
Optional<Symbol> newFirstSymbol = newGeometrySymbol(context, firstArgument);
Optional<Symbol> newSecondSymbol = newGeometrySymbol(context, secondArgument);
PlanNode leftNode = joinNode.getLeft();
PlanNode rightNode = joinNode.getRight();
PlanNode newLeftNode;
PlanNode newRightNode;
// Check if the order of arguments of the spatial function matches the order of join sides
int alignment = checkAlignment(joinNode, firstSymbols, secondSymbols);
if (alignment > 0) {
newLeftNode = newFirstSymbol.map(symbol -> addProjection(context, leftNode, symbol, firstArgument)).orElse(leftNode);
newRightNode = newSecondSymbol.map(symbol -> addProjection(context, rightNode, symbol, secondArgument)).orElse(rightNode);
} else if (alignment < 0) {
newLeftNode = newSecondSymbol.map(symbol -> addProjection(context, leftNode, symbol, secondArgument)).orElse(leftNode);
newRightNode = newFirstSymbol.map(symbol -> addProjection(context, rightNode, symbol, firstArgument)).orElse(rightNode);
} else {
return Result.empty();
}
RowExpression newFirstArgument = mapToExpression(newFirstSymbol, firstArgument, context);
RowExpression newSecondArgument = mapToExpression(newSecondSymbol, secondArgument, context);
Optional<Symbol> leftPartitionSymbol = Optional.empty();
Optional<Symbol> rightPartitionSymbol = Optional.empty();
if (kdbTree.isPresent()) {
leftPartitionSymbol = Optional.of(context.getSymbolAllocator().newSymbol("pid", INTEGER));
rightPartitionSymbol = Optional.of(context.getSymbolAllocator().newSymbol("pid", INTEGER));
if (alignment > 0) {
newLeftNode = addPartitioningNodes(metadata, context, newLeftNode, leftPartitionSymbol.get(), kdbTree.get(), newFirstArgument, Optional.empty());
newRightNode = addPartitioningNodes(metadata, context, newRightNode, rightPartitionSymbol.get(), kdbTree.get(), newSecondArgument, radius);
} else {
newLeftNode = addPartitioningNodes(metadata, context, newLeftNode, leftPartitionSymbol.get(), kdbTree.get(), newSecondArgument, Optional.empty());
newRightNode = addPartitioningNodes(metadata, context, newRightNode, rightPartitionSymbol.get(), kdbTree.get(), newFirstArgument, radius);
}
}
CallExpression newSpatialFunction = new CallExpression(spatialFunction.getDisplayName(), spatialFunction.getFunctionHandle(), spatialFunction.getType(), ImmutableList.of(newFirstArgument, newSecondArgument), Optional.empty());
RowExpression newFilter = replaceExpression(filter, ImmutableMap.of(spatialFunction, newSpatialFunction));
return Result.ofPlanNode(new SpatialJoinNode(nodeId, SpatialJoinNode.Type.fromJoinNodeType(joinNode.getType()), newLeftNode, newRightNode, outputSymbols, newFilter, leftPartitionSymbol, rightPartitionSymbol, kdbTree.map(KdbTreeUtils::toJson)));
}
use of io.prestosql.sql.planner.TypeAnalyzer in project hetu-core by openlookeng.
the class TestValidateStreamingAggregations method setup.
@BeforeClass
public void setup() {
metadata = getQueryRunner().getMetadata();
typeAnalyzer = new TypeAnalyzer(getQueryRunner().getSqlParser(), metadata);
CatalogName catalogName = getCurrentConnectorId();
nationTableHandle = new TableHandle(catalogName, new TpchTableHandle("nation", 1.0), TpchTransactionHandle.INSTANCE, Optional.empty());
}
use of io.prestosql.sql.planner.TypeAnalyzer in project hetu-core by openlookeng.
the class TestReorderWindows method assertUnitPlan.
private void assertUnitPlan(@Language("SQL") String sql, PlanMatchPattern pattern) {
List<PlanOptimizer> optimizers = ImmutableList.of(new UnaliasSymbolReferences(getQueryRunner().getMetadata()), new IterativeOptimizer(new RuleStatsRecorder(), getQueryRunner().getStatsCalculator(), getQueryRunner().getCostCalculator(), new TranslateExpressions(getMetadata(), getQueryRunner().getSqlParser()).rules(getMetadata())), new PredicatePushDown(getQueryRunner().getMetadata(), new TypeAnalyzer(getQueryRunner().getSqlParser(), getQueryRunner().getMetadata()), new PlanOptimizers.CostCalculationHandle(getQueryRunner().getStatsCalculator(), getQueryRunner().getCostCalculator(), null), false, false, false), new IterativeOptimizer(new RuleStatsRecorder(), getQueryRunner().getStatsCalculator(), getQueryRunner().getEstimatedExchangesCostCalculator(), ImmutableSet.of(new RemoveRedundantIdentityProjections(), new GatherAndMergeWindows.SwapAdjacentWindowsBySpecifications(0), new GatherAndMergeWindows.SwapAdjacentWindowsBySpecifications(1), new GatherAndMergeWindows.SwapAdjacentWindowsBySpecifications(2))), new PruneUnreferencedOutputs());
assertPlan(sql, pattern, optimizers);
}
use of io.prestosql.sql.planner.TypeAnalyzer in project hetu-core by openlookeng.
the class CachedSqlQueryExecution method createPlan.
@Override
protected Plan createPlan(Analysis analysis, Session session, List<PlanOptimizer> planOptimizers, PlanNodeIdAllocator idAllocator, Metadata metadata, TypeAnalyzer typeAnalyzer, StatsCalculator statsCalculator, CostCalculator costCalculator, WarningCollector warningCollector) {
Statement statement = analysis.getStatement();
// Get relevant Session properties which may affect the resulting execution plan
// Property to property value mapping
Map<String, Object> systemSessionProperties = new HashMap<>();
SystemSessionProperties sessionProperties = new SystemSessionProperties();
for (PropertyMetadata<?> property : sessionProperties.getSessionProperties()) {
systemSessionProperties.put(property.getName(), session.getSystemProperty(property.getName(), property.getJavaType()));
}
// if the original statement before rewriting is CreateIndex, set session to let connector know that pageMetadata should be enabled
if (analysis.getOriginalStatement() instanceof CreateIndex || analysis.getOriginalStatement() instanceof UpdateIndex) {
session.setPageMetadataEnabled(true);
}
// build list of fully qualified table names
List<String> tableNames = new ArrayList<>();
Map<String, TableStatistics> tableStatistics = new HashMap<>();
// Get column name to column type to detect column type changes between queries more easily
Map<String, Type> columnTypes = new HashMap<>();
// Cacheable conditions:
// 1. Caching must be enabled globally
// 2. Caching must be enabled in the session
// 3. There must not be any parameters in the query
// TODO: remove requirement for empty params and implement parameter rewrite
// 4. Methods in ConnectorTableHandle and ConnectorMetadata must be
// overwritten to allow access to fully qualified table names and column names
// 5. Statement must be an instance of Query and not contain CurrentX functions
boolean cacheable = this.cache.isPresent() && isExecutionPlanCacheEnabled(session) && analysis.getParameters().isEmpty() && validateAndExtractTableAndColumns(analysis, metadata, session, tableNames, tableStatistics, columnTypes) && isCacheable(statement) && // create index and update index should not be cached
(!(analysis.getOriginalStatement() instanceof CreateIndex || analysis.getOriginalStatement() instanceof UpdateIndex));
cacheable = cacheable && !tableNames.isEmpty();
if (!cacheable) {
return super.createPlan(analysis, session, planOptimizers, idAllocator, metadata, typeAnalyzer, statsCalculator, costCalculator, warningCollector);
}
List<String> optimizers = new ArrayList<>();
// build list of enabled optimizers and rules for cache key
for (PlanOptimizer planOptimizer : planOptimizers) {
if (planOptimizer instanceof IterativeOptimizer) {
IterativeOptimizer iterativeOptimizer = (IterativeOptimizer) planOptimizer;
Set<Rule<?>> rules = iterativeOptimizer.getRules();
for (Rule rule : rules) {
if (OptimizerUtils.isEnabledRule(rule, session)) {
optimizers.add(rule.getClass().getSimpleName());
}
}
} else {
if (OptimizerUtils.isEnabledLegacy(planOptimizer, session)) {
optimizers.add(planOptimizer.getClass().getSimpleName());
}
}
}
Set<String> connectors = tableNames.stream().map(table -> table.substring(0, table.indexOf("."))).collect(Collectors.toSet());
connectors.stream().forEach(connector -> {
for (Map.Entry<String, String> property : session.getConnectorProperties(new CatalogName(connector)).entrySet()) {
systemSessionProperties.put(connector + "." + property.getKey(), property.getValue());
}
});
Plan plan;
// TODO: Traverse the statement to build the key then combine tables/optimizers.. etc
int key = SqlQueryExecutionCacheKeyGenerator.buildKey((Query) statement, tableNames, optimizers, columnTypes, session.getTimeZoneKey(), systemSessionProperties);
CachedSqlQueryExecutionPlan cachedPlan = this.cache.get().getIfPresent(key);
HetuLogicalPlanner logicalPlanner = new HetuLogicalPlanner(session, planOptimizers, idAllocator, metadata, typeAnalyzer, statsCalculator, costCalculator, warningCollector);
PlanNode root;
plan = cachedPlan != null ? cachedPlan.getPlan() : null;
// that rely on system time
if (plan != null && cachedPlan.getTimeZoneKey().equals(session.getTimeZoneKey()) && cachedPlan.getStatement().equals(statement) && session.getTransactionId().isPresent() && cachedPlan.getIdentity().getUser().equals(session.getIdentity().getUser())) {
// TODO: traverse the statement and accept partial match
root = plan.getRoot();
boolean isValidCachePlan = tablesMatch(root, analysis.getTables());
try {
if (!isEqualBasicStatistics(cachedPlan.getTableStatistics(), tableStatistics, tableNames) || !isValidCachePlan) {
for (TableHandle tableHandle : analysis.getTables()) {
tableStatistics.replace(tableHandle.getFullyQualifiedName(), metadata.getTableStatistics(session, tableHandle, Constraint.alwaysTrue(), true));
}
if (!cachedPlan.getTableStatistics().equals(tableStatistics) || !isValidCachePlan) {
// Table have changed, therfore the cached plan may no longer be applicable
throw new NoSuchElementException();
}
}
// TableScanNode may contain the old transaction id.
// The following logic rewrites the logical plan by replacing the TableScanNode with a new TableScanNode which
// contains the new transaction id from session.
root = SimplePlanRewriter.rewriteWith(new TableHandleRewriter(session, analysis, metadata), root);
} catch (NoSuchElementException e) {
// Cached plan is outdated
// invalidate cache
this.cache.get().invalidateAll();
// Build a new plan
plan = createAndCachePlan(key, logicalPlanner, statement, tableNames, tableStatistics, optimizers, analysis, columnTypes, systemSessionProperties);
root = plan.getRoot();
}
} else {
// Build a new plan
for (TableHandle tableHandle : analysis.getTables()) {
tableStatistics.replace(tableHandle.getFullyQualifiedName(), metadata.getTableStatistics(session, tableHandle, Constraint.alwaysTrue(), true));
}
plan = createAndCachePlan(key, logicalPlanner, statement, tableNames, tableStatistics, optimizers, analysis, columnTypes, systemSessionProperties);
root = plan.getRoot();
}
// BeginTableWrite optimizer must be run at the end as the last optimization
// due to a hack Hetu community added which also serves to updates
// metadata in the nodes
root = this.beginTableWrite.optimize(root, session, null, null, null, null);
plan = update(plan, root);
return plan;
}
Aggregations