use of com.facebook.presto.spi.ConnectorPlanOptimizer in project presto by prestodb.
the class InternalIcebergConnectorFactory method createConnector.
public static Connector createConnector(String catalogName, Map<String, String> config, ConnectorContext context, Optional<ExtendedHiveMetastore> metastore) {
ClassLoader classLoader = InternalIcebergConnectorFactory.class.getClassLoader();
try (ThreadContextClassLoader ignored = new ThreadContextClassLoader(classLoader)) {
Bootstrap app = new Bootstrap(new EventModule(), new MBeanModule(), new JsonModule(), new IcebergModule(catalogName), new IcebergMetastoreModule(), new HiveS3Module(catalogName), new HiveAuthenticationModule(), new HiveMetastoreModule(catalogName, metastore), new CachingModule(), binder -> {
binder.bind(NodeVersion.class).toInstance(new NodeVersion(context.getNodeManager().getCurrentNode().getVersion()));
binder.bind(NodeManager.class).toInstance(context.getNodeManager());
binder.bind(TypeManager.class).toInstance(context.getTypeManager());
binder.bind(PageIndexerFactory.class).toInstance(context.getPageIndexerFactory());
binder.bind(StandardFunctionResolution.class).toInstance(context.getStandardFunctionResolution());
binder.bind(RowExpressionService.class).toInstance(context.getRowExpressionService());
});
Injector injector = app.doNotInitializeLogging().setRequiredConfigurationProperties(config).initialize();
LifeCycleManager lifeCycleManager = injector.getInstance(LifeCycleManager.class);
IcebergTransactionManager transactionManager = injector.getInstance(IcebergTransactionManager.class);
IcebergMetadataFactory metadataFactory = injector.getInstance(IcebergMetadataFactory.class);
ConnectorSplitManager splitManager = injector.getInstance(ConnectorSplitManager.class);
ConnectorPageSourceProvider connectorPageSource = injector.getInstance(ConnectorPageSourceProvider.class);
ConnectorPageSinkProvider pageSinkProvider = injector.getInstance(ConnectorPageSinkProvider.class);
ConnectorNodePartitioningProvider connectorDistributionProvider = injector.getInstance(ConnectorNodePartitioningProvider.class);
IcebergSessionProperties icebergSessionProperties = injector.getInstance(IcebergSessionProperties.class);
IcebergTableProperties icebergTableProperties = injector.getInstance(IcebergTableProperties.class);
Set<Procedure> procedures = injector.getInstance((Key<Set<Procedure>>) Key.get(Types.setOf(Procedure.class)));
ConnectorPlanOptimizer planOptimizer = injector.getInstance(IcebergPlanOptimizer.class);
return new IcebergConnector(lifeCycleManager, transactionManager, metadataFactory, new ClassLoaderSafeConnectorSplitManager(splitManager, classLoader), new ClassLoaderSafeConnectorPageSourceProvider(connectorPageSource, classLoader), new ClassLoaderSafeConnectorPageSinkProvider(pageSinkProvider, classLoader), new ClassLoaderSafeNodePartitioningProvider(connectorDistributionProvider, classLoader), ImmutableSet.of(), icebergSessionProperties.getSessionProperties(), IcebergSchemaProperties.SCHEMA_PROPERTIES, icebergTableProperties.getTableProperties(), new AllowAllAccessControl(), procedures, planOptimizer);
}
}
use of com.facebook.presto.spi.ConnectorPlanOptimizer in project presto by prestodb.
the class ApplyConnectorOptimization method optimize.
@Override
public PlanNode optimize(PlanNode plan, Session session, TypeProvider types, PlanVariableAllocator variableAllocator, PlanNodeIdAllocator idAllocator, WarningCollector warningCollector) {
requireNonNull(plan, "plan is null");
requireNonNull(session, "session is null");
requireNonNull(types, "types is null");
requireNonNull(variableAllocator, "variableAllocator is null");
requireNonNull(idAllocator, "idAllocator is null");
Map<ConnectorId, Set<ConnectorPlanOptimizer>> connectorOptimizers = connectorOptimizersSupplier.get();
if (connectorOptimizers.isEmpty()) {
return plan;
}
// retrieve all the connectors
ImmutableSet.Builder<ConnectorId> connectorIds = ImmutableSet.builder();
getAllConnectorIds(plan, connectorIds);
// In order to preserve the fixpoint, we will "pretend" the newly added C2 table scan is part of C1's job to maintain.
for (ConnectorId connectorId : connectorIds.build()) {
Set<ConnectorPlanOptimizer> optimizers = connectorOptimizers.get(connectorId);
if (optimizers == null) {
continue;
}
ImmutableMap.Builder<PlanNode, ConnectorPlanNodeContext> contextMapBuilder = ImmutableMap.builder();
buildConnectorPlanNodeContext(plan, null, contextMapBuilder);
Map<PlanNode, ConnectorPlanNodeContext> contextMap = contextMapBuilder.build();
// keep track of changed nodes; the keys are original nodes and the values are the new nodes
Map<PlanNode, PlanNode> updates = new HashMap<>();
// process connector optimizers
for (PlanNode node : contextMap.keySet()) {
// For a subtree with root `node` to be a max closure, the following conditions must hold:
// * The subtree with root `node` is a closure.
// * `node` has no parent, or the subtree with root as `node`'s parent is not a closure.
ConnectorPlanNodeContext context = contextMap.get(node);
if (!context.isClosure(connectorId) || !context.getParent().isPresent() || contextMap.get(context.getParent().get()).isClosure(connectorId)) {
continue;
}
PlanNode newNode = node;
// the returned node is still a max closure (only if there is no new connector added, which does happen but ignored here)
for (ConnectorPlanOptimizer optimizer : optimizers) {
newNode = optimizer.optimize(newNode, session.toConnectorSession(connectorId), variableAllocator, idAllocator);
}
if (node != newNode) {
// the optimizer has allocated a new PlanNode
checkState(containsAll(ImmutableSet.copyOf(newNode.getOutputVariables()), node.getOutputVariables()), "the connector optimizer from %s returns a node that does not cover all output before optimization", connectorId);
updates.put(node, newNode);
}
}
// up to this point, we have a set of updated nodes; need to recursively update their parents
// alter the plan with a bottom-up approach (but does not have to be strict bottom-up to guarantee the correctness of the algorithm)
// use "original nodes" to keep track of the plan structure and "updates" to keep track of the new nodes
Queue<PlanNode> originalNodes = new LinkedList<>(updates.keySet());
while (!originalNodes.isEmpty()) {
PlanNode originalNode = originalNodes.poll();
if (!contextMap.get(originalNode).getParent().isPresent()) {
// originalNode must be the root; update the plan
plan = updates.get(originalNode);
continue;
}
PlanNode originalParent = contextMap.get(originalNode).getParent().get();
// need to create a new parent given the child has changed; the new parent needs to point to the new child.
// if a node has been updated, it will occur in `updates`; otherwise, just use the original node
ImmutableList.Builder<PlanNode> newChildren = ImmutableList.builder();
originalParent.getSources().forEach(child -> newChildren.add(updates.getOrDefault(child, child)));
PlanNode newParent = originalParent.replaceChildren(newChildren.build());
// mark the new parent as updated
updates.put(originalParent, newParent);
// enqueue the parent node in order to recursively update its ancestors
originalNodes.add(originalParent);
}
}
return plan;
}
Aggregations