use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class DefaultExecutionTopologyTest method assertGraphEquals.
private static void assertGraphEquals(ExecutionGraph originalGraph, DefaultExecutionTopology adaptedTopology) {
Iterator<ExecutionVertex> originalVertices = originalGraph.getAllExecutionVertices().iterator();
Iterator<DefaultExecutionVertex> adaptedVertices = adaptedTopology.getVertices().iterator();
while (originalVertices.hasNext()) {
ExecutionVertex originalVertex = originalVertices.next();
DefaultExecutionVertex adaptedVertex = adaptedVertices.next();
assertEquals(originalVertex.getID(), adaptedVertex.getId());
List<IntermediateResultPartition> originalConsumedPartitions = new ArrayList<>();
for (ConsumedPartitionGroup consumedPartitionGroup : originalVertex.getAllConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
IntermediateResultPartition partition = originalVertex.getExecutionGraphAccessor().getResultPartitionOrThrow(partitionId);
originalConsumedPartitions.add(partition);
}
}
Iterable<DefaultResultPartition> adaptedConsumedPartitions = adaptedVertex.getConsumedResults();
assertPartitionsEquals(originalConsumedPartitions, adaptedConsumedPartitions);
Collection<IntermediateResultPartition> originalProducedPartitions = originalVertex.getProducedPartitions().values();
Iterable<DefaultResultPartition> adaptedProducedPartitions = adaptedVertex.getProducedResults();
assertPartitionsEquals(originalProducedPartitions, adaptedProducedPartitions);
}
assertFalse("Number of adapted vertices exceeds number of original vertices.", adaptedVertices.hasNext());
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class SchedulingPipelinedRegionComputeUtil method getNonReconnectableConsumedResults.
private static Iterable<SchedulingResultPartition> getNonReconnectableConsumedResults(SchedulingExecutionVertex vertex, Function<IntermediateResultPartitionID, ? extends SchedulingResultPartition> resultPartitionRetriever) {
List<SchedulingResultPartition> nonReconnectableConsumedResults = new ArrayList<>();
for (ConsumedPartitionGroup consumedPartitionGroup : vertex.getConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
SchedulingResultPartition consumedResult = resultPartitionRetriever.apply(partitionId);
if (consumedResult.getResultType().isReconnectable()) {
// The result types of partitions in one ConsumedPartitionGroup are all the same
break;
}
nonReconnectableConsumedResults.add(consumedResult);
}
}
return nonReconnectableConsumedResults;
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class SsgNetworkMemoryCalculationUtils method getMaxInputChannelNumsForDynamicGraph.
@VisibleForTesting
static Map<IntermediateDataSetID, Integer> getMaxInputChannelNumsForDynamicGraph(ExecutionJobVertex ejv) {
Map<IntermediateDataSetID, Integer> ret = new HashMap<>();
for (ExecutionVertex vertex : ejv.getTaskVertices()) {
for (ConsumedPartitionGroup partitionGroup : vertex.getAllConsumedPartitionGroups()) {
IntermediateResultPartition resultPartition = ejv.getGraph().getResultPartitionOrThrow((partitionGroup.getFirst()));
SubpartitionIndexRange subpartitionIndexRange = TaskDeploymentDescriptorFactory.computeConsumedSubpartitionRange(resultPartition, vertex.getParallelSubtaskIndex());
ret.merge(partitionGroup.getIntermediateDataSetID(), subpartitionIndexRange.size() * partitionGroup.size(), Integer::max);
}
}
return ret;
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class EdgeManagerBuildUtil method connectPointwise.
private static void connectPointwise(ExecutionVertex[] taskVertices, IntermediateResult intermediateResult) {
final int sourceCount = intermediateResult.getPartitions().length;
final int targetCount = taskVertices.length;
if (sourceCount == targetCount) {
for (int i = 0; i < sourceCount; i++) {
ExecutionVertex executionVertex = taskVertices[i];
IntermediateResultPartition partition = intermediateResult.getPartitions()[i];
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromSingleVertex(executionVertex.getID());
partition.addConsumers(consumerVertexGroup);
ConsumedPartitionGroup consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(partition.getPartitionId(), intermediateResult);
executionVertex.addConsumedPartitionGroup(consumedPartitionGroup);
}
} else if (sourceCount > targetCount) {
for (int index = 0; index < targetCount; index++) {
ExecutionVertex executionVertex = taskVertices[index];
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromSingleVertex(executionVertex.getID());
int start = index * sourceCount / targetCount;
int end = (index + 1) * sourceCount / targetCount;
List<IntermediateResultPartitionID> consumedPartitions = new ArrayList<>(end - start);
for (int i = start; i < end; i++) {
IntermediateResultPartition partition = intermediateResult.getPartitions()[i];
partition.addConsumers(consumerVertexGroup);
consumedPartitions.add(partition.getPartitionId());
}
ConsumedPartitionGroup consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(consumedPartitions, intermediateResult);
executionVertex.addConsumedPartitionGroup(consumedPartitionGroup);
}
} else {
for (int partitionNum = 0; partitionNum < sourceCount; partitionNum++) {
IntermediateResultPartition partition = intermediateResult.getPartitions()[partitionNum];
ConsumedPartitionGroup consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(partition.getPartitionId(), intermediateResult);
int start = (partitionNum * targetCount + sourceCount - 1) / sourceCount;
int end = ((partitionNum + 1) * targetCount + sourceCount - 1) / sourceCount;
List<ExecutionVertexID> consumers = new ArrayList<>(end - start);
for (int i = start; i < end; i++) {
ExecutionVertex executionVertex = taskVertices[i];
executionVertex.addConsumedPartitionGroup(consumedPartitionGroup);
consumers.add(executionVertex.getID());
}
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromMultipleVertices(consumers);
partition.addConsumers(consumerVertexGroup);
}
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class EdgeManagerBuildUtil method createAndRegisterConsumedPartitionGroupToEdgeManager.
private static ConsumedPartitionGroup createAndRegisterConsumedPartitionGroupToEdgeManager(IntermediateResultPartitionID consumedPartitionId, IntermediateResult intermediateResult) {
ConsumedPartitionGroup consumedPartitionGroup = ConsumedPartitionGroup.fromSinglePartition(consumedPartitionId);
registerConsumedPartitionGroupToEdgeManager(consumedPartitionGroup, intermediateResult);
return consumedPartitionGroup;
}
Aggregations