use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class EdgeManagerBuildUtil method createAndRegisterConsumedPartitionGroupToEdgeManager.
private static ConsumedPartitionGroup createAndRegisterConsumedPartitionGroupToEdgeManager(List<IntermediateResultPartitionID> consumedPartitions, IntermediateResult intermediateResult) {
ConsumedPartitionGroup consumedPartitionGroup = ConsumedPartitionGroup.fromMultiplePartitions(consumedPartitions);
registerConsumedPartitionGroupToEdgeManager(consumedPartitionGroup, intermediateResult);
return consumedPartitionGroup;
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class EdgeManagerBuildUtil method connectAllToAll.
private static void connectAllToAll(ExecutionVertex[] taskVertices, IntermediateResult intermediateResult) {
List<IntermediateResultPartitionID> consumedPartitions = Arrays.stream(intermediateResult.getPartitions()).map(IntermediateResultPartition::getPartitionId).collect(Collectors.toList());
ConsumedPartitionGroup consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(consumedPartitions, intermediateResult);
for (ExecutionVertex ev : taskVertices) {
ev.addConsumedPartitionGroup(consumedPartitionGroup);
}
List<ExecutionVertexID> consumerVertices = Arrays.stream(taskVertices).map(ExecutionVertex::getID).collect(Collectors.toList());
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromMultipleVertices(consumerVertices);
for (IntermediateResultPartition partition : intermediateResult.getPartitions()) {
partition.addConsumers(consumerVertexGroup);
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class DefaultExecutionGraph method releasePartitionGroups.
private void releasePartitionGroups(final List<ConsumedPartitionGroup> releasablePartitionGroups) {
if (releasablePartitionGroups.size() > 0) {
// Remove the cache of ShuffleDescriptors when ConsumedPartitionGroups are released
for (ConsumedPartitionGroup releasablePartitionGroup : releasablePartitionGroups) {
IntermediateResult totalResult = checkNotNull(intermediateResults.get(releasablePartitionGroup.getIntermediateDataSetID()));
totalResult.clearCachedInformationForPartitionGroup(releasablePartitionGroup);
}
final List<ResultPartitionID> releasablePartitionIds = releasablePartitionGroups.stream().flatMap(IterableUtils::toStream).map(this::createResultPartitionId).collect(Collectors.toList());
partitionTracker.stopTrackingAndReleasePartitions(releasablePartitionIds);
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class DefaultExecutionGraph method maybeReleasePartitionGroupsFor.
private void maybeReleasePartitionGroupsFor(final Execution attempt) {
final ExecutionVertexID finishedExecutionVertex = attempt.getVertex().getID();
if (attempt.getState() == ExecutionState.FINISHED) {
final List<ConsumedPartitionGroup> releasablePartitionGroups = partitionGroupReleaseStrategy.vertexFinished(finishedExecutionVertex);
releasePartitionGroups(releasablePartitionGroups);
} else {
partitionGroupReleaseStrategy.vertexUnfinished(finishedExecutionVertex);
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class TaskDeploymentDescriptorFactory method createInputGateDeploymentDescriptors.
private List<InputGateDeploymentDescriptor> createInputGateDeploymentDescriptors() throws IOException {
List<InputGateDeploymentDescriptor> inputGates = new ArrayList<>(consumedPartitionGroups.size());
for (ConsumedPartitionGroup consumedPartitionGroup : consumedPartitionGroups) {
// If the produced partition has multiple consumers registered, we
// need to request the one matching our sub task index.
// TODO Refactor after removing the consumers from the intermediate result partitions
IntermediateResultPartition resultPartition = resultPartitionRetriever.apply(consumedPartitionGroup.getFirst());
IntermediateResult consumedIntermediateResult = resultPartition.getIntermediateResult();
SubpartitionIndexRange consumedSubpartitionRange = computeConsumedSubpartitionRange(resultPartition, subtaskIndex);
IntermediateDataSetID resultId = consumedIntermediateResult.getId();
ResultPartitionType partitionType = consumedIntermediateResult.getResultType();
inputGates.add(new InputGateDeploymentDescriptor(resultId, partitionType, consumedSubpartitionRange, getConsumedPartitionShuffleDescriptors(consumedIntermediateResult, consumedPartitionGroup)));
}
return inputGates;
}
Aggregations