use of com.facebook.presto.sql.planner.PartitioningHandle in project presto by prestodb.
the class TestLocalExchange method testCreatePartitionFunction.
@Test
public void testCreatePartitionFunction() {
int partitionCount = 10;
PartitioningProviderManager partitioningProviderManager = new PartitioningProviderManager();
partitioningProviderManager.addPartitioningProvider(new ConnectorId("prism"), new ConnectorNodePartitioningProvider() {
@Override
public ConnectorBucketNodeMap getBucketNodeMap(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle, List<Node> sortedNodes) {
return createBucketNodeMap(Stream.generate(() -> sortedNodes).flatMap(List::stream).limit(10).collect(toImmutableList()), SOFT_AFFINITY);
}
@Override
public ToIntFunction<ConnectorSplit> getSplitBucketFunction(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle) {
return null;
}
@Override
public BucketFunction getBucketFunction(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle, List<Type> partitionChannelTypes, int bucketCount) {
return (Page page, int position) -> partitionCount;
}
@Override
public int getBucketCount(ConnectorTransactionHandle transactionHandle, ConnectorSession session, ConnectorPartitioningHandle partitioningHandle) {
return 10;
}
});
PartitioningHandle partitioningHandle = new PartitioningHandle(Optional.of(new ConnectorId("prism")), Optional.of(new ConnectorTransactionHandle() {
@Override
public int hashCode() {
return super.hashCode();
}
@Override
public boolean equals(Object obj) {
return super.equals(obj);
}
}), new ConnectorPartitioningHandle() {
@Override
public boolean isSingleNode() {
return false;
}
@Override
public boolean isCoordinatorOnly() {
return false;
}
});
PartitionFunction partitionFunction = createPartitionFunction(partitioningProviderManager, session, partitioningHandle, 600, ImmutableList.of(), false);
assertEquals(partitionFunction.getPartitionCount(), partitionCount);
}
use of com.facebook.presto.sql.planner.PartitioningHandle in project presto by prestodb.
the class SqlQueryScheduler method createStages.
private List<SqlStageExecution> createStages(Optional<SqlStageExecution> parent, AtomicInteger nextStageId, LocationFactory locationFactory, StageExecutionPlan plan, NodeScheduler nodeScheduler, RemoteTaskFactory remoteTaskFactory, Session session, int splitBatchSize, Function<PartitioningHandle, NodePartitionMap> partitioningCache, ExecutorService executor, NodeTaskMap nodeTaskMap, ImmutableMap.Builder<StageId, StageScheduler> stageSchedulers, ImmutableMap.Builder<StageId, StageLinkage> stageLinkages) {
ImmutableList.Builder<SqlStageExecution> stages = ImmutableList.builder();
StageId stageId = new StageId(queryStateMachine.getQueryId(), nextStageId.getAndIncrement());
SqlStageExecution stage = new SqlStageExecution(stageId, locationFactory.createStageLocation(stageId), plan.getFragment(), remoteTaskFactory, session, summarizeTaskInfo, nodeTaskMap, executor, schedulerStats);
stages.add(stage);
Optional<int[]> bucketToPartition;
PartitioningHandle partitioningHandle = plan.getFragment().getPartitioning();
if (partitioningHandle.equals(SOURCE_DISTRIBUTION)) {
// nodes are selected dynamically based on the constraints of the splits and the system load
Entry<PlanNodeId, SplitSource> entry = Iterables.getOnlyElement(plan.getSplitSources().entrySet());
ConnectorId connectorId = entry.getValue().getConnectorId();
if (isInternalSystemConnector(connectorId)) {
connectorId = null;
}
NodeSelector nodeSelector = nodeScheduler.createNodeSelector(connectorId);
SplitPlacementPolicy placementPolicy = new DynamicSplitPlacementPolicy(nodeSelector, stage::getAllTasks);
stageSchedulers.put(stageId, new SourcePartitionedScheduler(stage, entry.getKey(), entry.getValue(), placementPolicy, splitBatchSize));
bucketToPartition = Optional.of(new int[1]);
} else {
// nodes are pre determined by the nodePartitionMap
NodePartitionMap nodePartitionMap = partitioningCache.apply(plan.getFragment().getPartitioning());
Map<PlanNodeId, SplitSource> splitSources = plan.getSplitSources();
if (!splitSources.isEmpty()) {
stageSchedulers.put(stageId, new FixedSourcePartitionedScheduler(stage, splitSources, plan.getFragment().getPartitionedSources(), nodePartitionMap, splitBatchSize, nodeScheduler.createNodeSelector(null)));
bucketToPartition = Optional.of(nodePartitionMap.getBucketToPartition());
} else {
Map<Integer, Node> partitionToNode = nodePartitionMap.getPartitionToNode();
// todo this should asynchronously wait a standard timeout period before failing
checkCondition(!partitionToNode.isEmpty(), NO_NODES_AVAILABLE, "No worker nodes available");
stageSchedulers.put(stageId, new FixedCountScheduler(stage, partitionToNode));
bucketToPartition = Optional.of(nodePartitionMap.getBucketToPartition());
}
}
ImmutableSet.Builder<SqlStageExecution> childStagesBuilder = ImmutableSet.builder();
for (StageExecutionPlan subStagePlan : plan.getSubStages()) {
List<SqlStageExecution> subTree = createStages(Optional.of(stage), nextStageId, locationFactory, subStagePlan.withBucketToPartition(bucketToPartition), nodeScheduler, remoteTaskFactory, session, splitBatchSize, partitioningCache, executor, nodeTaskMap, stageSchedulers, stageLinkages);
stages.addAll(subTree);
SqlStageExecution childStage = subTree.get(0);
childStagesBuilder.add(childStage);
}
Set<SqlStageExecution> childStages = childStagesBuilder.build();
stage.addStateChangeListener(newState -> {
if (newState.isDone()) {
childStages.forEach(SqlStageExecution::cancel);
}
});
stageLinkages.put(stageId, new StageLinkage(plan.getFragment().getId(), parent, childStages));
return stages.build();
}
use of com.facebook.presto.sql.planner.PartitioningHandle in project presto by prestodb.
the class SectionExecutionFactory method createSectionExecutions.
/**
* returns a List of SectionExecutions in a postorder representation of the tree
*/
public SectionExecution createSectionExecutions(Session session, StreamingPlanSection section, ExchangeLocationsConsumer locationsConsumer, Optional<int[]> bucketToPartition, OutputBuffers outputBuffers, boolean summarizeTaskInfo, RemoteTaskFactory remoteTaskFactory, SplitSourceFactory splitSourceFactory, int attemptId) {
// Only fetch a distribution once per section to ensure all stages see the same machine assignments
Map<PartitioningHandle, NodePartitionMap> partitioningCache = new HashMap<>();
TableWriteInfo tableWriteInfo = createTableWriteInfo(section.getPlan(), metadata, session);
List<StageExecutionAndScheduler> sectionStages = createStreamingLinkedStageExecutions(session, locationsConsumer, section.getPlan().withBucketToPartition(bucketToPartition), partitioningHandle -> partitioningCache.computeIfAbsent(partitioningHandle, handle -> nodePartitioningManager.getNodePartitioningMap(session, handle)), tableWriteInfo, Optional.empty(), summarizeTaskInfo, remoteTaskFactory, splitSourceFactory, attemptId);
StageExecutionAndScheduler rootStage = getLast(sectionStages);
rootStage.getStageExecution().setOutputBuffers(outputBuffers);
return new SectionExecution(rootStage, sectionStages);
}
use of com.facebook.presto.sql.planner.PartitioningHandle in project presto by prestodb.
the class SectionExecutionFactory method createStreamingLinkedStageExecutions.
/**
* returns a List of StageExecutionAndSchedulers in a postorder representation of the tree
*/
private List<StageExecutionAndScheduler> createStreamingLinkedStageExecutions(Session session, ExchangeLocationsConsumer parent, StreamingSubPlan plan, Function<PartitioningHandle, NodePartitionMap> partitioningCache, TableWriteInfo tableWriteInfo, Optional<SqlStageExecution> parentStageExecution, boolean summarizeTaskInfo, RemoteTaskFactory remoteTaskFactory, SplitSourceFactory splitSourceFactory, int attemptId) {
ImmutableList.Builder<StageExecutionAndScheduler> stageExecutionAndSchedulers = ImmutableList.builder();
PlanFragmentId fragmentId = plan.getFragment().getId();
StageId stageId = new StageId(session.getQueryId(), fragmentId.getId());
SqlStageExecution stageExecution = createSqlStageExecution(new StageExecutionId(stageId, attemptId), plan.getFragment(), remoteTaskFactory, session, summarizeTaskInfo, nodeTaskMap, executor, failureDetector, schedulerStats, tableWriteInfo);
PartitioningHandle partitioningHandle = plan.getFragment().getPartitioning();
List<RemoteSourceNode> remoteSourceNodes = plan.getFragment().getRemoteSourceNodes();
Optional<int[]> bucketToPartition = getBucketToPartition(partitioningHandle, partitioningCache, plan.getFragment().getRoot(), remoteSourceNodes);
// create child stages
ImmutableSet.Builder<SqlStageExecution> childStagesBuilder = ImmutableSet.builder();
for (StreamingSubPlan stagePlan : plan.getChildren()) {
List<StageExecutionAndScheduler> subTree = createStreamingLinkedStageExecutions(session, stageExecution::addExchangeLocations, stagePlan.withBucketToPartition(bucketToPartition), partitioningCache, tableWriteInfo, Optional.of(stageExecution), summarizeTaskInfo, remoteTaskFactory, splitSourceFactory, attemptId);
stageExecutionAndSchedulers.addAll(subTree);
childStagesBuilder.add(getLast(subTree).getStageExecution());
}
Set<SqlStageExecution> childStageExecutions = childStagesBuilder.build();
stageExecution.addStateChangeListener(newState -> {
if (newState.isDone()) {
childStageExecutions.forEach(SqlStageExecution::cancel);
}
});
StageLinkage stageLinkage = new StageLinkage(fragmentId, parent, childStageExecutions);
StageScheduler stageScheduler = createStageScheduler(splitSourceFactory, session, plan, partitioningCache, parentStageExecution, stageId, stageExecution, partitioningHandle, tableWriteInfo, childStageExecutions);
stageExecutionAndSchedulers.add(new StageExecutionAndScheduler(stageExecution, stageLinkage, stageScheduler));
return stageExecutionAndSchedulers.build();
}
use of com.facebook.presto.sql.planner.PartitioningHandle in project presto by prestodb.
the class MetadataManager method getCommonPartitioning.
@Override
public Optional<PartitioningHandle> getCommonPartitioning(Session session, PartitioningHandle left, PartitioningHandle right) {
Optional<ConnectorId> leftConnectorId = left.getConnectorId();
Optional<ConnectorId> rightConnectorId = right.getConnectorId();
if (!leftConnectorId.isPresent() || !rightConnectorId.isPresent() || !leftConnectorId.equals(rightConnectorId)) {
return Optional.empty();
}
if (!left.getTransactionHandle().equals(right.getTransactionHandle())) {
return Optional.empty();
}
ConnectorId connectorId = leftConnectorId.get();
CatalogMetadata catalogMetadata = getCatalogMetadata(session, connectorId);
ConnectorMetadata metadata = catalogMetadata.getMetadataFor(connectorId);
Optional<ConnectorPartitioningHandle> commonHandle = metadata.getCommonPartitioningHandle(session.toConnectorSession(connectorId), left.getConnectorHandle(), right.getConnectorHandle());
return commonHandle.map(handle -> new PartitioningHandle(Optional.of(connectorId), left.getTransactionHandle(), handle));
}
Aggregations