use of io.prestosql.spi.plan.TableScanNode in project hetu-core by openlookeng.
the class TestSourcePartitionedScheduler method createPlan.
private static StageExecutionPlan createPlan(ConnectorSplitSource splitSource) {
Symbol symbol = new Symbol("column");
// table scan with splitCount splits
PlanNodeId tableScanNodeId = new PlanNodeId("plan_id");
TableScanNode tableScan = TableScanNode.newInstance(tableScanNodeId, TEST_TABLE_HANDLE, ImmutableList.of(symbol), ImmutableMap.of(symbol, new TestingColumnHandle("column")), ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, new UUID(0, 0), 0, false);
RemoteSourceNode remote = new RemoteSourceNode(new PlanNodeId("remote_id"), new PlanFragmentId("plan_fragment_id"), ImmutableList.of(), Optional.empty(), GATHER);
PlanFragment testFragment = new PlanFragment(new PlanFragmentId("plan_id"), new JoinNode(new PlanNodeId("join_id"), INNER, tableScan, remote, ImmutableList.of(), ImmutableList.<Symbol>builder().addAll(tableScan.getOutputSymbols()).addAll(remote.getOutputSymbols()).build(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of()), ImmutableMap.of(symbol, VARCHAR), SOURCE_DISTRIBUTION, ImmutableList.of(tableScanNodeId), new PartitioningScheme(Partitioning.create(SINGLE_DISTRIBUTION, ImmutableList.of()), ImmutableList.of(symbol)), ungroupedExecution(), StatsAndCosts.empty(), Optional.empty(), Optional.empty(), Optional.empty());
return new StageExecutionPlan(testFragment, ImmutableMap.of(tableScanNodeId, new ConnectorAwareSplitSource(CONNECTOR_ID, splitSource)), ImmutableList.of(), ImmutableMap.of(tableScanNodeId, new TableInfo(new QualifiedObjectName("test", "test", "test"), TupleDomain.all())));
}
use of io.prestosql.spi.plan.TableScanNode in project hetu-core by openlookeng.
the class CachedSqlQueryExecution method getTableScanNodes.
private static List<PlanNode> getTableScanNodes(PlanNode planNode) {
List<PlanNode> result = new LinkedList<>();
Queue<PlanNode> queue = new LinkedList<>();
queue.add(planNode);
while (!queue.isEmpty()) {
PlanNode node = queue.poll();
if (node instanceof TableScanNode) {
result.add(node);
}
queue.addAll(node.getSources());
}
return result;
}
use of io.prestosql.spi.plan.TableScanNode in project hetu-core by openlookeng.
the class CachedSqlQueryExecution method createPlan.
@Override
protected Plan createPlan(Analysis analysis, Session session, List<PlanOptimizer> planOptimizers, PlanNodeIdAllocator idAllocator, Metadata metadata, TypeAnalyzer typeAnalyzer, StatsCalculator statsCalculator, CostCalculator costCalculator, WarningCollector warningCollector) {
Statement statement = analysis.getStatement();
// Get relevant Session properties which may affect the resulting execution plan
// Property to property value mapping
Map<String, Object> systemSessionProperties = new HashMap<>();
SystemSessionProperties sessionProperties = new SystemSessionProperties();
for (PropertyMetadata<?> property : sessionProperties.getSessionProperties()) {
systemSessionProperties.put(property.getName(), session.getSystemProperty(property.getName(), property.getJavaType()));
}
// if the original statement before rewriting is CreateIndex, set session to let connector know that pageMetadata should be enabled
if (analysis.getOriginalStatement() instanceof CreateIndex || analysis.getOriginalStatement() instanceof UpdateIndex) {
session.setPageMetadataEnabled(true);
}
// build list of fully qualified table names
List<String> tableNames = new ArrayList<>();
Map<String, TableStatistics> tableStatistics = new HashMap<>();
// Get column name to column type to detect column type changes between queries more easily
Map<String, Type> columnTypes = new HashMap<>();
// Cacheable conditions:
// 1. Caching must be enabled globally
// 2. Caching must be enabled in the session
// 3. There must not be any parameters in the query
// TODO: remove requirement for empty params and implement parameter rewrite
// 4. Methods in ConnectorTableHandle and ConnectorMetadata must be
// overwritten to allow access to fully qualified table names and column names
// 5. Statement must be an instance of Query and not contain CurrentX functions
boolean cacheable = this.cache.isPresent() && isExecutionPlanCacheEnabled(session) && analysis.getParameters().isEmpty() && validateAndExtractTableAndColumns(analysis, metadata, session, tableNames, tableStatistics, columnTypes) && isCacheable(statement) && // create index and update index should not be cached
(!(analysis.getOriginalStatement() instanceof CreateIndex || analysis.getOriginalStatement() instanceof UpdateIndex));
cacheable = cacheable && !tableNames.isEmpty();
if (!cacheable) {
return super.createPlan(analysis, session, planOptimizers, idAllocator, metadata, typeAnalyzer, statsCalculator, costCalculator, warningCollector);
}
List<String> optimizers = new ArrayList<>();
// build list of enabled optimizers and rules for cache key
for (PlanOptimizer planOptimizer : planOptimizers) {
if (planOptimizer instanceof IterativeOptimizer) {
IterativeOptimizer iterativeOptimizer = (IterativeOptimizer) planOptimizer;
Set<Rule<?>> rules = iterativeOptimizer.getRules();
for (Rule rule : rules) {
if (OptimizerUtils.isEnabledRule(rule, session)) {
optimizers.add(rule.getClass().getSimpleName());
}
}
} else {
if (OptimizerUtils.isEnabledLegacy(planOptimizer, session)) {
optimizers.add(planOptimizer.getClass().getSimpleName());
}
}
}
Set<String> connectors = tableNames.stream().map(table -> table.substring(0, table.indexOf("."))).collect(Collectors.toSet());
connectors.stream().forEach(connector -> {
for (Map.Entry<String, String> property : session.getConnectorProperties(new CatalogName(connector)).entrySet()) {
systemSessionProperties.put(connector + "." + property.getKey(), property.getValue());
}
});
Plan plan;
// TODO: Traverse the statement to build the key then combine tables/optimizers.. etc
int key = SqlQueryExecutionCacheKeyGenerator.buildKey((Query) statement, tableNames, optimizers, columnTypes, session.getTimeZoneKey(), systemSessionProperties);
CachedSqlQueryExecutionPlan cachedPlan = this.cache.get().getIfPresent(key);
HetuLogicalPlanner logicalPlanner = new HetuLogicalPlanner(session, planOptimizers, idAllocator, metadata, typeAnalyzer, statsCalculator, costCalculator, warningCollector);
PlanNode root;
plan = cachedPlan != null ? cachedPlan.getPlan() : null;
// that rely on system time
if (plan != null && cachedPlan.getTimeZoneKey().equals(session.getTimeZoneKey()) && cachedPlan.getStatement().equals(statement) && session.getTransactionId().isPresent() && cachedPlan.getIdentity().getUser().equals(session.getIdentity().getUser())) {
// TODO: traverse the statement and accept partial match
root = plan.getRoot();
boolean isValidCachePlan = tablesMatch(root, analysis.getTables());
try {
if (!isEqualBasicStatistics(cachedPlan.getTableStatistics(), tableStatistics, tableNames) || !isValidCachePlan) {
for (TableHandle tableHandle : analysis.getTables()) {
tableStatistics.replace(tableHandle.getFullyQualifiedName(), metadata.getTableStatistics(session, tableHandle, Constraint.alwaysTrue(), true));
}
if (!cachedPlan.getTableStatistics().equals(tableStatistics) || !isValidCachePlan) {
// Table have changed, therfore the cached plan may no longer be applicable
throw new NoSuchElementException();
}
}
// TableScanNode may contain the old transaction id.
// The following logic rewrites the logical plan by replacing the TableScanNode with a new TableScanNode which
// contains the new transaction id from session.
root = SimplePlanRewriter.rewriteWith(new TableHandleRewriter(session, analysis, metadata), root);
} catch (NoSuchElementException e) {
// Cached plan is outdated
// invalidate cache
this.cache.get().invalidateAll();
// Build a new plan
plan = createAndCachePlan(key, logicalPlanner, statement, tableNames, tableStatistics, optimizers, analysis, columnTypes, systemSessionProperties);
root = plan.getRoot();
}
} else {
// Build a new plan
for (TableHandle tableHandle : analysis.getTables()) {
tableStatistics.replace(tableHandle.getFullyQualifiedName(), metadata.getTableStatistics(session, tableHandle, Constraint.alwaysTrue(), true));
}
plan = createAndCachePlan(key, logicalPlanner, statement, tableNames, tableStatistics, optimizers, analysis, columnTypes, systemSessionProperties);
root = plan.getRoot();
}
// BeginTableWrite optimizer must be run at the end as the last optimization
// due to a hack Hetu community added which also serves to updates
// metadata in the nodes
root = this.beginTableWrite.optimize(root, session, null, null, null, null);
plan = update(plan, root);
return plan;
}
use of io.prestosql.spi.plan.TableScanNode in project hetu-core by openlookeng.
the class LocalQueryRunner method createDrivers.
private List<Driver> createDrivers(Session session, Plan plan, OutputFactory outputFactory, TaskContext taskContext) {
if (printPlan) {
System.out.println(PlanPrinter.textLogicalPlan(plan.getRoot(), plan.getTypes(), metadata, plan.getStatsAndCosts(), session, 0, false));
}
SubPlan subplan = planFragmenter.createSubPlans(session, plan, true, WarningCollector.NOOP);
if (!subplan.getChildren().isEmpty()) {
throw new AssertionError("Expected subplan to have no children");
}
NodeInfo nodeInfo = new NodeInfo("test");
FileSystemClientManager fileSystemClientManager = new FileSystemClientManager();
SeedStoreManager seedStoreManager = new SeedStoreManager(fileSystemClientManager);
StateStoreProvider stateStoreProvider = new LocalStateStoreProvider(seedStoreManager);
LocalExecutionPlanner executionPlanner = new LocalExecutionPlanner(metadata, new TypeAnalyzer(sqlParser, metadata), Optional.empty(), pageSourceManager, indexManager, nodePartitioningManager, pageSinkManager, null, expressionCompiler, pageFunctionCompiler, joinFilterFunctionCompiler, new IndexJoinLookupStats(), this.taskManagerConfig, spillerFactory, singleStreamSpillerFactory, partitioningSpillerFactory, new PagesIndex.TestingFactory(false), joinCompiler, new LookupJoinOperators(), new OrderingCompiler(), nodeInfo, stateStoreProvider, new StateStoreListenerManager(stateStoreProvider), new DynamicFilterCacheManager(), heuristicIndexerManager, cubeManager);
// plan query
StageExecutionDescriptor stageExecutionDescriptor = subplan.getFragment().getStageExecutionDescriptor();
LocalExecutionPlan localExecutionPlan = executionPlanner.plan(taskContext, stageExecutionDescriptor, subplan.getFragment().getRoot(), subplan.getFragment().getPartitioningScheme().getOutputLayout(), plan.getTypes(), subplan.getFragment().getPartitionedSources(), null, outputFactory, Optional.empty(), Optional.empty(), null);
// generate sources
List<TaskSource> sources = new ArrayList<>();
long sequenceId = 0;
for (TableScanNode tableScan : findTableScanNodes(subplan.getFragment().getRoot())) {
TableHandle table = tableScan.getTable();
SplitSource splitSource = splitManager.getSplits(session, table, stageExecutionDescriptor.isScanGroupedExecution(tableScan.getId()) ? GROUPED_SCHEDULING : UNGROUPED_SCHEDULING, null, Optional.empty(), Collections.emptyMap(), ImmutableSet.of(), tableScan.getStrategy() != ReuseExchangeOperator.STRATEGY.REUSE_STRATEGY_DEFAULT, tableScan.getId());
ImmutableSet.Builder<ScheduledSplit> scheduledSplits = ImmutableSet.builder();
while (!splitSource.isFinished()) {
for (Split split : getNextBatch(splitSource)) {
scheduledSplits.add(new ScheduledSplit(sequenceId++, tableScan.getId(), split));
}
}
sources.add(new TaskSource(tableScan.getId(), scheduledSplits.build(), true));
}
// create drivers
List<Driver> drivers = new ArrayList<>();
Map<PlanNodeId, DriverFactory> driverFactoriesBySource = new HashMap<>();
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
for (int i = 0; i < driverFactory.getDriverInstances().orElse(1); i++) {
if (driverFactory.getSourceId().isPresent()) {
checkState(driverFactoriesBySource.put(driverFactory.getSourceId().get(), driverFactory) == null);
} else {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), false).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
drivers.add(driver);
}
}
}
// add sources to the drivers
ImmutableSet<PlanNodeId> partitionedSources = ImmutableSet.copyOf(subplan.getFragment().getPartitionedSources());
for (TaskSource source : sources) {
DriverFactory driverFactory = driverFactoriesBySource.get(source.getPlanNodeId());
checkState(driverFactory != null);
boolean partitioned = partitionedSources.contains(driverFactory.getSourceId().get());
for (ScheduledSplit split : source.getSplits()) {
DriverContext driverContext = taskContext.addPipelineContext(driverFactory.getPipelineId(), driverFactory.isInputDriver(), driverFactory.isOutputDriver(), partitioned).addDriverContext();
Driver driver = driverFactory.createDriver(driverContext);
driver.updateSource(new TaskSource(split.getPlanNodeId(), ImmutableSet.of(split), true));
drivers.add(driver);
}
}
for (DriverFactory driverFactory : localExecutionPlan.getDriverFactories()) {
driverFactory.noMoreDrivers();
}
return ImmutableList.copyOf(drivers);
}
use of io.prestosql.spi.plan.TableScanNode in project hetu-core by openlookeng.
the class TestEffectivePredicateExtractor method testRightJoinWithFalseInner.
@Test
public void testRightJoinWithFalseInner() {
List<JoinNode.EquiJoinClause> criteria = ImmutableList.of(new JoinNode.EquiJoinClause(A, D));
Map<Symbol, ColumnHandle> leftAssignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(A, B, C)));
TableScanNode leftScan = tableScanNode(leftAssignments);
Map<Symbol, ColumnHandle> rightAssignments = Maps.filterKeys(scanAssignments, Predicates.in(ImmutableList.of(D, E, F)));
TableScanNode rightScan = tableScanNode(rightAssignments);
FilterNode left = filter(leftScan, FALSE_LITERAL);
FilterNode right = filter(rightScan, and(equals(DE, EE), lessThan(FE, bigintLiteral(100))));
PlanNode node = new JoinNode(newId(), JoinNode.Type.RIGHT, left, right, criteria, ImmutableList.<Symbol>builder().addAll(left.getOutputSymbols()).addAll(right.getOutputSymbols()).build(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), Optional.empty(), ImmutableMap.of());
Expression effectivePredicate = effectivePredicateExtractor.extract(SESSION, node, TypeProvider.empty(), typeAnalyzer);
// False literal on the left side should be ignored
assertEquals(normalizeConjuncts(effectivePredicate), normalizeConjunctsSet(equals(DE, EE), lessThan(FE, bigintLiteral(100)), or(equals(AE, DE), isNull(AE))));
}
Aggregations