use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultSchedulerTest method restartFailedTask.
@Test
public void restartFailedTask() {
final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
final ArchivedExecutionVertex archivedExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getArchivedExecutionGraph().getAllExecutionVertices());
final ExecutionAttemptID attemptId = archivedExecutionVertex.getCurrentExecutionAttempt().getAttemptId();
scheduler.updateTaskExecutionState(createFailedTaskExecutionState(attemptId));
taskRestartExecutor.triggerScheduledTasks();
final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices();
final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
assertThat(deployedExecutionVertices, contains(executionVertexId, executionVertexId));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultSchedulingPipelinedRegionTest method returnsIncidentBlockingPartitions.
/**
* Tests if the consumed inputs of the pipelined regions are computed correctly using the Job
* graph below.
*
* <pre>
* c
* / X
* a -+- b e
* \ /
* d
* </pre>
*
* <p>Pipelined regions: {a}, {b, c, d, e}
*/
@Test
public void returnsIncidentBlockingPartitions() throws Exception {
final JobVertex a = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex b = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex c = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex d = ExecutionGraphTestUtils.createNoOpVertex(1);
final JobVertex e = ExecutionGraphTestUtils.createNoOpVertex(1);
b.connectNewDataSetAsInput(a, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
c.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
d.connectNewDataSetAsInput(b, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
e.connectNewDataSetAsInput(c, DistributionPattern.POINTWISE, ResultPartitionType.BLOCKING);
e.connectNewDataSetAsInput(d, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final DefaultExecutionGraph simpleTestGraph = ExecutionGraphTestUtils.createSimpleTestGraph(a, b, c, d, e);
final DefaultExecutionTopology topology = DefaultExecutionTopology.fromExecutionGraph(simpleTestGraph);
final DefaultSchedulingPipelinedRegion firstPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(a.getID(), 0));
final DefaultSchedulingPipelinedRegion secondPipelinedRegion = topology.getPipelinedRegionOfVertex(new ExecutionVertexID(e.getID(), 0));
final DefaultExecutionVertex vertexB0 = topology.getVertex(new ExecutionVertexID(b.getID(), 0));
final IntermediateResultPartitionID b0ConsumedResultPartition = Iterables.getOnlyElement(vertexB0.getConsumedResults()).getId();
final Set<IntermediateResultPartitionID> secondPipelinedRegionConsumedResults = new HashSet<>();
for (ConsumedPartitionGroup consumedPartitionGroup : secondPipelinedRegion.getAllBlockingConsumedPartitionGroups()) {
for (IntermediateResultPartitionID partitionId : consumedPartitionGroup) {
if (!secondPipelinedRegion.contains(topology.getResultPartition(partitionId).getProducer().getId())) {
secondPipelinedRegionConsumedResults.add(partitionId);
}
}
}
assertThat(firstPipelinedRegion.getAllBlockingConsumedPartitionGroups().iterator().hasNext(), is(false));
assertThat(secondPipelinedRegionConsumedResults, contains(b0ConsumedResultPartition));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultExecutionTopologyTest method testExistingRegionsAreNotAffectedDuringTopologyUpdate.
@Test
public void testExistingRegionsAreNotAffectedDuringTopologyUpdate() throws Exception {
final JobVertex[] jobVertices = createJobVertices(BLOCKING);
executionGraph = createDynamicGraph(jobVertices);
adapter = DefaultExecutionTopology.fromExecutionGraph(executionGraph);
final ExecutionJobVertex ejv1 = executionGraph.getJobVertex(jobVertices[0].getID());
final ExecutionJobVertex ejv2 = executionGraph.getJobVertex(jobVertices[1].getID());
executionGraph.initializeJobVertex(ejv1, 0L);
adapter.notifyExecutionGraphUpdated(executionGraph, Collections.singletonList(ejv1));
SchedulingPipelinedRegion regionOld = adapter.getPipelinedRegionOfVertex(new ExecutionVertexID(ejv1.getJobVertexId(), 0));
executionGraph.initializeJobVertex(ejv2, 0L);
adapter.notifyExecutionGraphUpdated(executionGraph, Collections.singletonList(ejv2));
SchedulingPipelinedRegion regionNew = adapter.getPipelinedRegionOfVertex(new ExecutionVertexID(ejv1.getJobVertexId(), 0));
assertSame(regionOld, regionNew);
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class DefaultResultPartitionTest method testGetConsumerVertexGroup.
@Test
public void testGetConsumerVertexGroup() {
assertFalse(resultPartition.getConsumerVertexGroup().isPresent());
// test update consumers
ExecutionVertexID executionVertexId = new ExecutionVertexID(new JobVertexID(), 0);
consumerVertexGroups.put(resultPartition.getId(), ConsumerVertexGroup.fromSingleVertex(executionVertexId));
assertTrue(resultPartition.getConsumerVertexGroup().isPresent());
assertThat(resultPartition.getConsumerVertexGroup().get(), contains(executionVertexId));
}
use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink by apache.
the class TestExecutionSlotAllocator method createSlotVertexAssignments.
private List<SlotExecutionVertexAssignment> createSlotVertexAssignments(final Collection<ExecutionVertexID> executionVertexIds) {
final List<SlotExecutionVertexAssignment> result = new ArrayList<>();
for (ExecutionVertexID executionVertexId : executionVertexIds) {
final CompletableFuture<LogicalSlot> logicalSlotFuture = new CompletableFuture<>();
result.add(new SlotExecutionVertexAssignment(executionVertexId, logicalSlotFuture));
}
return result;
}
Aggregations