use of org.apache.flink.runtime.executiongraph.InternalExecutionGraphAccessor in project flink by apache.
the class ExecutionGraphToInputsLocationsRetrieverAdapter method getConsumedResultPartitionsProducers.
@Override
public Collection<Collection<ExecutionVertexID>> getConsumedResultPartitionsProducers(ExecutionVertexID executionVertexId) {
ExecutionVertex ev = getExecutionVertex(executionVertexId);
InternalExecutionGraphAccessor executionGraphAccessor = ev.getExecutionGraphAccessor();
List<Collection<ExecutionVertexID>> resultPartitionProducers = new ArrayList<>(ev.getNumberOfInputs());
for (ConsumedPartitionGroup consumedPartitions : ev.getAllConsumedPartitionGroups()) {
List<ExecutionVertexID> producers = new ArrayList<>(consumedPartitions.size());
for (IntermediateResultPartitionID consumedPartitionId : consumedPartitions) {
ExecutionVertex producer = executionGraphAccessor.getResultPartitionOrThrow(consumedPartitionId).getProducer();
producers.add(producer.getID());
}
resultPartitionProducers.add(producers);
}
return resultPartitionProducers;
}
use of org.apache.flink.runtime.executiongraph.InternalExecutionGraphAccessor in project flink by apache.
the class DefaultCheckpointPlanCalculator method hasRunningPrecedentTasks.
private boolean hasRunningPrecedentTasks(ExecutionVertex vertex, List<JobEdge> prevJobEdges, Map<JobVertexID, BitSet> taskRunningStatusByVertex) {
InternalExecutionGraphAccessor executionGraphAccessor = vertex.getExecutionGraphAccessor();
for (int i = 0; i < prevJobEdges.size(); ++i) {
if (prevJobEdges.get(i).getDistributionPattern() == DistributionPattern.POINTWISE) {
for (IntermediateResultPartitionID consumedPartitionId : vertex.getConsumedPartitionGroup(i)) {
ExecutionVertex precedentTask = executionGraphAccessor.getResultPartitionOrThrow(consumedPartitionId).getProducer();
BitSet precedentVertexRunningStatus = taskRunningStatusByVertex.get(precedentTask.getJobvertexId());
if (precedentVertexRunningStatus.get(precedentTask.getParallelSubtaskIndex())) {
return true;
}
}
}
}
return false;
}
use of org.apache.flink.runtime.executiongraph.InternalExecutionGraphAccessor in project flink by apache.
the class RescalePartitionerTest method testExecutionGraphGeneration.
@Test
public void testExecutionGraphGeneration() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(4);
// get input data
DataStream<String> text = env.addSource(new ParallelSourceFunction<String>() {
private static final long serialVersionUID = 7772338606389180774L;
@Override
public void run(SourceContext<String> ctx) throws Exception {
}
@Override
public void cancel() {
}
}).setParallelism(2);
DataStream<Tuple2<String, Integer>> counts = text.rescale().flatMap(new FlatMapFunction<String, Tuple2<String, Integer>>() {
private static final long serialVersionUID = -5255930322161596829L;
@Override
public void flatMap(String value, Collector<Tuple2<String, Integer>> out) throws Exception {
}
});
counts.rescale().print().setParallelism(2);
JobGraph jobGraph = env.getStreamGraph().getJobGraph();
List<JobVertex> jobVertices = jobGraph.getVerticesSortedTopologicallyFromSources();
JobVertex sourceVertex = jobVertices.get(0);
JobVertex mapVertex = jobVertices.get(1);
JobVertex sinkVertex = jobVertices.get(2);
assertEquals(2, sourceVertex.getParallelism());
assertEquals(4, mapVertex.getParallelism());
assertEquals(2, sinkVertex.getParallelism());
ExecutionGraph eg = TestingDefaultExecutionGraphBuilder.newBuilder().setVertexParallelismStore(SchedulerBase.computeVertexParallelismStore(jobGraph)).build();
try {
eg.attachJobGraph(jobVertices);
} catch (JobException e) {
e.printStackTrace();
fail("Building ExecutionGraph failed: " + e.getMessage());
}
ExecutionJobVertex execSourceVertex = eg.getJobVertex(sourceVertex.getID());
ExecutionJobVertex execMapVertex = eg.getJobVertex(mapVertex.getID());
ExecutionJobVertex execSinkVertex = eg.getJobVertex(sinkVertex.getID());
assertEquals(0, execSourceVertex.getInputs().size());
assertEquals(1, execMapVertex.getInputs().size());
assertEquals(4, execMapVertex.getParallelism());
ExecutionVertex[] mapTaskVertices = execMapVertex.getTaskVertices();
// verify that we have each parallel input partition exactly twice, i.e. that one source
// sends to two unique mappers
Map<Integer, Integer> mapInputPartitionCounts = new HashMap<>();
for (ExecutionVertex mapTaskVertex : mapTaskVertices) {
assertEquals(1, mapTaskVertex.getNumberOfInputs());
assertEquals(1, mapTaskVertex.getConsumedPartitionGroup(0).size());
IntermediateResultPartitionID consumedPartitionId = mapTaskVertex.getConsumedPartitionGroup(0).getFirst();
assertEquals(sourceVertex.getID(), mapTaskVertex.getExecutionGraphAccessor().getResultPartitionOrThrow(consumedPartitionId).getProducer().getJobvertexId());
int inputPartition = consumedPartitionId.getPartitionNumber();
if (!mapInputPartitionCounts.containsKey(inputPartition)) {
mapInputPartitionCounts.put(inputPartition, 1);
} else {
mapInputPartitionCounts.put(inputPartition, mapInputPartitionCounts.get(inputPartition) + 1);
}
}
assertEquals(2, mapInputPartitionCounts.size());
for (int count : mapInputPartitionCounts.values()) {
assertEquals(2, count);
}
assertEquals(1, execSinkVertex.getInputs().size());
assertEquals(2, execSinkVertex.getParallelism());
ExecutionVertex[] sinkTaskVertices = execSinkVertex.getTaskVertices();
InternalExecutionGraphAccessor executionGraphAccessor = execSinkVertex.getGraph();
// verify each sink instance has two inputs from the map and that each map subpartition
// only occurs in one unique input edge
Set<Integer> mapSubpartitions = new HashSet<>();
for (ExecutionVertex sinkTaskVertex : sinkTaskVertices) {
assertEquals(1, sinkTaskVertex.getNumberOfInputs());
assertEquals(2, sinkTaskVertex.getConsumedPartitionGroup(0).size());
for (IntermediateResultPartitionID consumedPartitionId : sinkTaskVertex.getConsumedPartitionGroup(0)) {
IntermediateResultPartition consumedPartition = executionGraphAccessor.getResultPartitionOrThrow(consumedPartitionId);
assertEquals(mapVertex.getID(), consumedPartition.getProducer().getJobvertexId());
int partitionNumber = consumedPartition.getPartitionNumber();
assertFalse(mapSubpartitions.contains(partitionNumber));
mapSubpartitions.add(partitionNumber);
}
}
assertEquals(4, mapSubpartitions.size());
}
Aggregations