use of org.apache.flink.runtime.scheduler.strategy.ConsumerVertexGroup in project flink by apache.
the class SchedulingPipelinedRegionComputeUtil method buildOutEdgesDesc.
private static List<List<Integer>> buildOutEdgesDesc(final Map<SchedulingExecutionVertex, Set<SchedulingExecutionVertex>> vertexToRegion, final List<Set<SchedulingExecutionVertex>> regionList, final Function<ExecutionVertexID, ? extends SchedulingExecutionVertex> executionVertexRetriever) {
final Map<Set<SchedulingExecutionVertex>, Integer> regionIndices = new IdentityHashMap<>();
for (int i = 0; i < regionList.size(); i++) {
regionIndices.put(regionList.get(i), i);
}
final List<List<Integer>> outEdges = new ArrayList<>(regionList.size());
for (Set<SchedulingExecutionVertex> currentRegion : regionList) {
final List<Integer> currentRegionOutEdges = new ArrayList<>();
for (SchedulingExecutionVertex vertex : currentRegion) {
for (SchedulingResultPartition producedResult : vertex.getProducedResults()) {
if (!producedResult.getResultType().isReconnectable()) {
continue;
}
final Optional<ConsumerVertexGroup> consumerVertexGroup = producedResult.getConsumerVertexGroup();
if (!consumerVertexGroup.isPresent()) {
continue;
}
for (ExecutionVertexID consumerVertexId : consumerVertexGroup.get()) {
SchedulingExecutionVertex consumerVertex = executionVertexRetriever.apply(consumerVertexId);
// regions and cannot be merged
if (!vertexToRegion.containsKey(consumerVertex)) {
break;
}
if (!currentRegion.contains(consumerVertex)) {
currentRegionOutEdges.add(regionIndices.get(vertexToRegion.get(consumerVertex)));
}
}
}
}
outEdges.add(currentRegionOutEdges);
}
return outEdges;
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumerVertexGroup in project flink by apache.
the class IntermediateResultPartition method computeNumberOfSubpartitions.
private int computeNumberOfSubpartitions() {
if (!getProducer().getExecutionGraphAccessor().isDynamic()) {
ConsumerVertexGroup consumerVertexGroup = getConsumerVertexGroup();
checkState(consumerVertexGroup.size() > 0);
// consuming sub task.
return consumerVertexGroup.size();
} else {
if (totalResult.isBroadcast()) {
// and all the downstream vertices should consume this subpartition.
return 1;
} else {
return computeNumberOfMaxPossiblePartitionConsumers();
}
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumerVertexGroup in project flink by apache.
the class DefaultExecutionTopologyTest method assertPartitionsEquals.
private static void assertPartitionsEquals(Iterable<IntermediateResultPartition> originalResultPartitions, Iterable<DefaultResultPartition> adaptedResultPartitions) {
assertEquals(Iterables.size(originalResultPartitions), Iterables.size(adaptedResultPartitions));
for (IntermediateResultPartition originalPartition : originalResultPartitions) {
DefaultResultPartition adaptedPartition = IterableUtils.toStream(adaptedResultPartitions).filter(adapted -> adapted.getId().equals(originalPartition.getPartitionId())).findAny().orElseThrow(() -> new AssertionError("Could not find matching adapted partition for " + originalPartition));
assertPartitionEquals(originalPartition, adaptedPartition);
ConsumerVertexGroup consumerVertexGroup = originalPartition.getConsumerVertexGroup();
Optional<ConsumerVertexGroup> adaptedConsumers = adaptedPartition.getConsumerVertexGroup();
assertTrue(adaptedConsumers.isPresent());
for (ExecutionVertexID originalId : consumerVertexGroup) {
// it is sufficient to verify that some vertex exists with the correct ID here,
// since deep equality is verified later in the main loop
// this DOES rely on an implicit assumption that the vertices objects returned by
// the topology are
// identical to those stored in the partition
assertTrue(IterableUtils.toStream(adaptedConsumers.get()).anyMatch(adaptedConsumer -> adaptedConsumer.equals(originalId)));
}
}
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumerVertexGroup in project flink by apache.
the class EdgeManagerBuildUtil method connectPointwise.
private static void connectPointwise(ExecutionVertex[] taskVertices, IntermediateResult intermediateResult) {
final int sourceCount = intermediateResult.getPartitions().length;
final int targetCount = taskVertices.length;
if (sourceCount == targetCount) {
for (int i = 0; i < sourceCount; i++) {
ExecutionVertex executionVertex = taskVertices[i];
IntermediateResultPartition partition = intermediateResult.getPartitions()[i];
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromSingleVertex(executionVertex.getID());
partition.addConsumers(consumerVertexGroup);
ConsumedPartitionGroup consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(partition.getPartitionId(), intermediateResult);
executionVertex.addConsumedPartitionGroup(consumedPartitionGroup);
}
} else if (sourceCount > targetCount) {
for (int index = 0; index < targetCount; index++) {
ExecutionVertex executionVertex = taskVertices[index];
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromSingleVertex(executionVertex.getID());
int start = index * sourceCount / targetCount;
int end = (index + 1) * sourceCount / targetCount;
List<IntermediateResultPartitionID> consumedPartitions = new ArrayList<>(end - start);
for (int i = start; i < end; i++) {
IntermediateResultPartition partition = intermediateResult.getPartitions()[i];
partition.addConsumers(consumerVertexGroup);
consumedPartitions.add(partition.getPartitionId());
}
ConsumedPartitionGroup consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(consumedPartitions, intermediateResult);
executionVertex.addConsumedPartitionGroup(consumedPartitionGroup);
}
} else {
for (int partitionNum = 0; partitionNum < sourceCount; partitionNum++) {
IntermediateResultPartition partition = intermediateResult.getPartitions()[partitionNum];
ConsumedPartitionGroup consumedPartitionGroup = createAndRegisterConsumedPartitionGroupToEdgeManager(partition.getPartitionId(), intermediateResult);
int start = (partitionNum * targetCount + sourceCount - 1) / sourceCount;
int end = ((partitionNum + 1) * targetCount + sourceCount - 1) / sourceCount;
List<ExecutionVertexID> consumers = new ArrayList<>(end - start);
for (int i = start; i < end; i++) {
ExecutionVertex executionVertex = taskVertices[i];
executionVertex.addConsumedPartitionGroup(consumedPartitionGroup);
consumers.add(executionVertex.getID());
}
ConsumerVertexGroup consumerVertexGroup = ConsumerVertexGroup.fromMultipleVertices(consumers);
partition.addConsumers(consumerVertexGroup);
}
}
}
Aggregations