use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class EdgeManagerTest method testGetConsumedPartitionGroup.
@Test
public void testGetConsumedPartitionGroup() throws Exception {
JobVertex v1 = new JobVertex("source");
JobVertex v2 = new JobVertex("sink");
v1.setParallelism(2);
v2.setParallelism(2);
v1.setInvokableClass(NoOpInvokable.class);
v2.setInvokableClass(NoOpInvokable.class);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
JobGraph jobGraph = JobGraphTestUtils.batchJobGraph(v1, v2);
SchedulerBase scheduler = SchedulerTestingUtils.createScheduler(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread());
ExecutionGraph eg = scheduler.getExecutionGraph();
ConsumedPartitionGroup groupRetrievedByDownstreamVertex = Objects.requireNonNull(eg.getJobVertex(v2.getID())).getTaskVertices()[0].getAllConsumedPartitionGroups().get(0);
IntermediateResultPartition consumedPartition = Objects.requireNonNull(eg.getJobVertex(v1.getID())).getProducedDataSets()[0].getPartitions()[0];
ConsumedPartitionGroup groupRetrievedByIntermediateResultPartition = consumedPartition.getConsumedPartitionGroups().get(0);
assertEquals(groupRetrievedByDownstreamVertex, groupRetrievedByIntermediateResultPartition);
ConsumedPartitionGroup groupRetrievedByScheduledResultPartition = scheduler.getExecutionGraph().getSchedulingTopology().getResultPartition(consumedPartition.getPartitionId()).getConsumedPartitionGroups().get(0);
assertEquals(groupRetrievedByDownstreamVertex, groupRetrievedByScheduledResultPartition);
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class DefaultSchedulingPipelinedRegionTest method returnsIncidentBlockingPartitions.
/**
* Tests if the consumed inputs of the pipelined regions are computed correctly using the Job
* graph below.
*
* <pre>
* c
* / X
* a -+- b e
* \ /
* d
* </pre>
*
* <p>Pipelined regions: {a}, {b, c, d, e}
*/
@Test
public void returnsIncidentBlockingPartitions() throws Exception {
final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1);
b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final DefaultExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e);
final DefaultExecutionTopology topology = DefaultExecutionTopology.fromExecutionGraph(simpleTestGraph);
final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0));
final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0));
final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0));
final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId();
final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = new HashSet<>();
for (ConsumedPartitionGroup consumedPartitionGroup : secondPipelinedRegion.getAllBlockingConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
if (!secondPipelinedRegion.contains(topology.getResultPartition(partitionId).getProducer().getId())) {
secondPipelinedRegionConsumedResults.add(partitionId);
}
}
}
assertThat(firstPipelinedRegion.getAllBlockingConsumedPartitionGroups().iterator().hasNext(), is(false));
assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition));
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class DefaultExecutionVertexTest method setUp.
@Before
public void setUp() throws Exception {
intermediateResultPartitionId = new IntermediateResultPartitionID();
DefaultResultPartition schedulingResultPartition = new DefaultResultPartition(intermediateResultPartitionId, new IntermediateDataSetID(), BLOCKING, () -> ResultPartitionState.CREATED, () -> {
throw new UnsupportedOperationException();
}, () -> {
throw new UnsupportedOperationException();
});
producerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.singletonList(schedulingResultPartition), stateSupplier, Collections.emptyList(), partitionID -> {
throw new UnsupportedOperationException();
});
schedulingResultPartition.setProducer(producerVertex);
List<ConsumedPartitionGroup> consumedPartitionGroups = Collections.singletonList(ConsumedPartitionGroup.fromSinglePartition(intermediateResultPartitionId));
Map<IntermediateResultPartitionID, DefaultResultPartition> resultPartitionById = Collections.singletonMap(intermediateResultPartitionId, schedulingResultPartition);
consumerVertex = new DefaultExecutionVertex(new ExecutionVertexID(new JobVertexID(), 0), Collections.emptyList(), stateSupplier, consumedPartitionGroups, resultPartitionById::get);
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class DefaultExecutionGraphConstructionTest method testRegisterConsumedPartitionGroupToEdgeManager.
@Test
public void testRegisterConsumedPartitionGroupToEdgeManager() throws Exception {
JobVertex v1 = new JobVertex("source");
JobVertex v2 = new JobVertex("sink");
v1.setParallelism(2);
v2.setParallelism(2);
v2.connectNewDataSetAsInput(v1, DistributionPattern.ALL_TO_ALL, ResultPartitionType.BLOCKING);
List<JobVertex> ordered = new ArrayList<>(Arrays.asList(v1, v2));
ExecutionGraph eg = createDefaultExecutionGraph(ordered);
eg.attachJobGraph(ordered);
IntermediateResult result = Objects.requireNonNull(eg.getJobVertex(v1.getID())).getProducedDataSets()[0];
IntermediateResultPartition partition1 = result.getPartitions()[0];
IntermediateResultPartition partition2 = result.getPartitions()[1];
assertEquals(partition1.getConsumedPartitionGroups().get(0), partition2.getConsumedPartitionGroups().get(0));
ConsumedPartitionGroup consumedPartitionGroup = partition1.getConsumedPartitionGroups().get(0);
Set<IntermediateResultPartitionID> partitionIds = new HashSet<>();
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
partitionIds.add(partitionId);
}
assertThat(partitionIds, containsInAnyOrder(partition1.getPartitionId(), partition2.getPartitionId()));
}
use of org.apache.flink.runtime.scheduler.strategy.ConsumedPartitionGroup in project flink by apache.
the class ExecutionGraphTestUtils method verifyGeneratedExecutionJobVertex.
// ------------------------------------------------------------------------
// graph vertex verifications
// ------------------------------------------------------------------------
/**
* Verifies the generated {@link ExecutionJobVertex} for a given {@link JobVertex} in a {@link
* ExecutionGraph}.
*
* @param executionGraph the generated execution graph
* @param originJobVertex the vertex to verify for
* @param inputJobVertices upstream vertices of the verified vertex, used to check inputs of
* generated vertex
* @param outputJobVertices downstream vertices of the verified vertex, used to check produced
* data sets of generated vertex
*/
public static void verifyGeneratedExecutionJobVertex(ExecutionGraph executionGraph, JobVertex originJobVertex, @Nullable List<JobVertex> inputJobVertices, @Nullable List<JobVertex> outputJobVertices) {
ExecutionJobVertex ejv = executionGraph.getAllVertices().get(originJobVertex.getID());
assertNotNull(ejv);
// verify basic properties
assertEquals(originJobVertex.getParallelism(), ejv.getParallelism());
assertEquals(executionGraph.getJobID(), ejv.getJobId());
assertEquals(originJobVertex.getID(), ejv.getJobVertexId());
assertEquals(originJobVertex, ejv.getJobVertex());
// verify produced data sets
if (outputJobVertices == null) {
assertEquals(0, ejv.getProducedDataSets().length);
} else {
assertEquals(outputJobVertices.size(), ejv.getProducedDataSets().length);
for (int i = 0; i < outputJobVertices.size(); i++) {
assertEquals(originJobVertex.getProducedDataSets().get(i).getId(), ejv.getProducedDataSets()[i].getId());
assertEquals(originJobVertex.getParallelism(), ejv.getProducedDataSets()[0].getPartitions().length);
}
}
// verify task vertices for their basic properties and their inputs
assertEquals(originJobVertex.getParallelism(), ejv.getTaskVertices().length);
int subtaskIndex = 0;
for (ExecutionVertex ev : ejv.getTaskVertices()) {
assertEquals(executionGraph.getJobID(), ev.getJobId());
assertEquals(originJobVertex.getID(), ev.getJobvertexId());
assertEquals(originJobVertex.getParallelism(), ev.getTotalNumberOfParallelSubtasks());
assertEquals(subtaskIndex, ev.getParallelSubtaskIndex());
if (inputJobVertices == null) {
assertEquals(0, ev.getNumberOfInputs());
} else {
assertEquals(inputJobVertices.size(), ev.getNumberOfInputs());
for (int i = 0; i < inputJobVertices.size(); i++) {
ConsumedPartitionGroup consumedPartitionGroup = ev.getConsumedPartitionGroup(i);
assertEquals(inputJobVertices.get(i).getParallelism(), consumedPartitionGroup.size());
int expectedPartitionNum = 0;
for (IntermediateResultPartitionID consumedPartitionId : consumedPartitionGroup) {
assertEquals(expectedPartitionNum, consumedPartitionId.getPartitionNumber());
expectedPartitionNum++;
}
}
}
subtaskIndex++;
}
}
Aggregations