Search in sources :

Example 71 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultSchedulerTest method failJobWillIncrementVertexVersions.

@Test
public void failJobWillIncrementVertexVersions() {
    final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
    final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
    final ExecutionVertexID onlyExecutionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
    final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
    final ExecutionVertexVersion executionVertexVersion = executionVertexVersioner.getExecutionVertexVersion(onlyExecutionVertexId);
    scheduler.failJob(new FlinkException("Test failure."), System.currentTimeMillis());
    assertTrue(executionVertexVersioner.isModified(executionVertexVersion));
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) FlinkException(org.apache.flink.util.FlinkException) AdaptiveSchedulerTest(org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest) Test(org.junit.Test)

Example 72 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultSchedulerTest method failGlobalWhenRestoringStateFails.

@Test
public void failGlobalWhenRestoringStateFails() throws Exception {
    final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
    final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
    enableCheckpointing(jobGraph);
    final CountDownLatch checkpointTriggeredLatch = getCheckpointTriggeredLatch();
    final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
    final ArchivedExecutionVertex onlyExecutionVertex = Iterables.getOnlyElement(scheduler.requestJob().getArchivedExecutionGraph().getAllExecutionVertices());
    final ExecutionAttemptID attemptId = onlyExecutionVertex.getCurrentExecutionAttempt().getAttemptId();
    transitionToRunning(scheduler, attemptId);
    final CheckpointCoordinator checkpointCoordinator = getCheckpointCoordinator(scheduler);
    // register a master hook to fail state restore
    final TestMasterHook masterHook = TestMasterHook.fromId("testHook");
    masterHook.enableFailOnRestore();
    checkpointCoordinator.addMasterHook(masterHook);
    // complete one checkpoint for state restore
    checkpointCoordinator.triggerCheckpoint(false);
    checkpointTriggeredLatch.await();
    final long checkpointId = checkpointCoordinator.getPendingCheckpoints().keySet().iterator().next();
    acknowledgePendingCheckpoint(scheduler, checkpointId);
    scheduler.updateTaskExecutionState(createFailedTaskExecutionState(attemptId));
    taskRestartExecutor.triggerScheduledTasks();
    final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices();
    // the first task failover should be skipped on state restore failure
    final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
    assertThat(deployedExecutionVertices, contains(executionVertexId));
    // a global failure should be triggered on state restore failure
    masterHook.disableFailOnRestore();
    taskRestartExecutor.triggerScheduledTasks();
    assertThat(deployedExecutionVertices, contains(executionVertexId, executionVertexId));
}
Also used : TestMasterHook(org.apache.flink.runtime.checkpoint.hooks.TestMasterHook) JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionAttemptID(org.apache.flink.runtime.executiongraph.ExecutionAttemptID) CheckpointCoordinator(org.apache.flink.runtime.checkpoint.CheckpointCoordinator) SchedulerTestingUtils.getCheckpointCoordinator(org.apache.flink.runtime.scheduler.SchedulerTestingUtils.getCheckpointCoordinator) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) ArchivedExecutionVertex(org.apache.flink.runtime.executiongraph.ArchivedExecutionVertex) CountDownLatch(java.util.concurrent.CountDownLatch) AdaptiveSchedulerTest(org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest) Test(org.junit.Test)

Example 73 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultSchedulerTest method startScheduling.

@Test
public void startScheduling() {
    final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
    final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
    createSchedulerAndStartScheduling(jobGraph);
    final List<ExecutionVertexID> deployedExecutionVertices = testExecutionVertexOperations.getDeployedVertices();
    final ExecutionVertexID executionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
    assertThat(deployedExecutionVertices, contains(executionVertexId));
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) AdaptiveSchedulerTest(org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest) Test(org.junit.Test)

Example 74 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultSchedulerTest method deployTasksOnlyWhenAllSlotRequestsAreFulfilled.

@Test
public void deployTasksOnlyWhenAllSlotRequestsAreFulfilled() throws Exception {
    final JobGraph jobGraph = singleJobVertexJobGraph(4);
    final JobVertexID onlyJobVertexId = getOnlyJobVertex(jobGraph).getID();
    testExecutionSlotAllocator.disableAutoCompletePendingRequests();
    final TestSchedulingStrategy.Factory schedulingStrategyFactory = new TestSchedulingStrategy.Factory();
    final DefaultScheduler scheduler = createScheduler(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread(), schedulingStrategyFactory);
    final TestSchedulingStrategy schedulingStrategy = schedulingStrategyFactory.getLastCreatedSchedulingStrategy();
    scheduler.startScheduling();
    final List<ExecutionVertexID> verticesToSchedule = Arrays.asList(new ExecutionVertexID(onlyJobVertexId, 0), new ExecutionVertexID(onlyJobVertexId, 1), new ExecutionVertexID(onlyJobVertexId, 2), new ExecutionVertexID(onlyJobVertexId, 3));
    schedulingStrategy.schedule(verticesToSchedule);
    assertThat(testExecutionVertexOperations.getDeployedVertices(), hasSize(0));
    testExecutionSlotAllocator.completePendingRequest(verticesToSchedule.get(0));
    assertThat(testExecutionVertexOperations.getDeployedVertices(), hasSize(0));
    testExecutionSlotAllocator.completePendingRequests();
    assertThat(testExecutionVertexOperations.getDeployedVertices(), hasSize(4));
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) JobVertexID(org.apache.flink.runtime.jobgraph.JobVertexID) TestingCheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.TestingCheckpointRecoveryFactory) SchedulingStrategyFactory(org.apache.flink.runtime.scheduler.strategy.SchedulingStrategyFactory) TestFailoverStrategyFactory(org.apache.flink.runtime.executiongraph.utils.TestFailoverStrategyFactory) CheckpointRecoveryFactory(org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory) TestSchedulingStrategy(org.apache.flink.runtime.scheduler.strategy.TestSchedulingStrategy) AdaptiveSchedulerTest(org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest) Test(org.junit.Test)

Example 75 with ExecutionVertexID

use of org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID in project flink-mirror by flink-ci.

the class DefaultSchedulerTest method cancelJobWillIncrementVertexVersions.

@Test
public void cancelJobWillIncrementVertexVersions() {
    final JobGraph jobGraph = singleNonParallelJobVertexJobGraph();
    final JobVertex onlyJobVertex = getOnlyJobVertex(jobGraph);
    final ExecutionVertexID onlyExecutionVertexId = new ExecutionVertexID(onlyJobVertex.getID(), 0);
    final DefaultScheduler scheduler = createSchedulerAndStartScheduling(jobGraph);
    final ExecutionVertexVersion executionVertexVersion = executionVertexVersioner.getExecutionVertexVersion(onlyExecutionVertexId);
    scheduler.cancel();
    assertTrue(executionVertexVersioner.isModified(executionVertexVersion));
}
Also used : JobGraph(org.apache.flink.runtime.jobgraph.JobGraph) JobVertex(org.apache.flink.runtime.jobgraph.JobVertex) ExecutionVertexID(org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID) AdaptiveSchedulerTest(org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest) Test(org.junit.Test)

Aggregations

ExecutionVertexID (org.apache.flink.runtime.scheduler.strategy.ExecutionVertexID)231 Test (org.junit.Test)165 JobVertexID (org.apache.flink.runtime.jobgraph.JobVertexID)63 JobGraph (org.apache.flink.runtime.jobgraph.JobGraph)57 JobVertex (org.apache.flink.runtime.jobgraph.JobVertex)54 SchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.SchedulingExecutionVertex)51 Set (java.util.Set)48 IntermediateResultPartitionID (org.apache.flink.runtime.jobgraph.IntermediateResultPartitionID)45 AdaptiveSchedulerTest (org.apache.flink.runtime.scheduler.adaptive.AdaptiveSchedulerTest)45 TestingSchedulingExecutionVertex (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingExecutionVertex)45 Collection (java.util.Collection)33 TestingSchedulingTopology (org.apache.flink.runtime.scheduler.strategy.TestingSchedulingTopology)33 HashSet (java.util.HashSet)30 ExecutionVertex (org.apache.flink.runtime.executiongraph.ExecutionVertex)30 ArrayList (java.util.ArrayList)27 Map (java.util.Map)27 HashMap (java.util.HashMap)24 List (java.util.List)24 CompletableFuture (java.util.concurrent.CompletableFuture)24 TaskManagerLocation (org.apache.flink.runtime.taskmanager.TaskManagerLocation)24