use of org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration in project flink by apache.
the class ArchivedExecutionGraphTest method testCheckpointSettingsArchiving.
@Test
public void testCheckpointSettingsArchiving() {
final CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration = CheckpointCoordinatorConfiguration.builder().build();
final ArchivedExecutionGraph archivedGraph = ArchivedExecutionGraph.createSparseArchivedExecutionGraph(new JobID(), "TestJob", JobStatus.INITIALIZING, null, new JobCheckpointingSettings(checkpointCoordinatorConfiguration, null), System.currentTimeMillis());
assertContainsCheckpointSettings(archivedGraph);
}
use of org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration in project flink by apache.
the class ArchivedExecutionGraphTest method setupExecutionGraph.
@BeforeClass
public static void setupExecutionGraph() throws Exception {
// -------------------------------------------------------------------------------------------------------------
// Setup
// -------------------------------------------------------------------------------------------------------------
JobVertexID v1ID = new JobVertexID();
JobVertexID v2ID = new JobVertexID();
JobVertex v1 = new JobVertex("v1", v1ID);
JobVertex v2 = new JobVertex("v2", v2ID);
v1.setParallelism(1);
v2.setParallelism(2);
v1.setInvokableClass(AbstractInvokable.class);
v2.setInvokableClass(AbstractInvokable.class);
ExecutionConfig config = new ExecutionConfig();
config.setRestartStrategy(new RestartStrategies.NoRestartStrategyConfiguration());
config.setParallelism(4);
config.enableObjectReuse();
config.setGlobalJobParameters(new TestJobParameters());
CheckpointCoordinatorConfiguration chkConfig = new CheckpointCoordinatorConfiguration(100, 100, 100, 1, CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION, true, false, 0, 0);
JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings(chkConfig, null);
final JobGraph jobGraph = JobGraphBuilder.newStreamingJobGraphBuilder().addJobVertices(asList(v1, v2)).setJobCheckpointingSettings(checkpointingSettings).setExecutionConfig(config).build();
SchedulerBase scheduler = SchedulerTestingUtils.createScheduler(jobGraph, ComponentMainThreadExecutorServiceAdapter.forMainThread());
runtimeGraph = scheduler.getExecutionGraph();
scheduler.startScheduling();
scheduler.updateTaskExecutionState(new TaskExecutionState(runtimeGraph.getAllExecutionVertices().iterator().next().getCurrentExecutionAttempt().getAttemptId(), ExecutionState.FAILED, new RuntimeException("Local failure")));
}
use of org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration in project flink by apache.
the class TestUtils method createJobGraphFromJobVerticesWithCheckpointing.
@Nonnull
public static JobGraph createJobGraphFromJobVerticesWithCheckpointing(SavepointRestoreSettings savepointRestoreSettings, JobVertex... jobVertices) {
// enable checkpointing which is required to resume from a savepoint
final CheckpointCoordinatorConfiguration checkpointCoordinatorConfiguration = CheckpointCoordinatorConfiguration.builder().setCheckpointInterval(1000L).setCheckpointTimeout(1000L).setMinPauseBetweenCheckpoints(1000L).setMaxConcurrentCheckpoints(1).setCheckpointRetentionPolicy(CheckpointRetentionPolicy.NEVER_RETAIN_AFTER_TERMINATION).setExactlyOnce(true).setUnalignedCheckpointsEnabled(false).setTolerableCheckpointFailureNumber(0).build();
final JobCheckpointingSettings checkpointingSettings = new JobCheckpointingSettings(checkpointCoordinatorConfiguration, null);
return JobGraphBuilder.newStreamingJobGraphBuilder().addJobVertices(Arrays.asList(jobVertices)).setJobCheckpointingSettings(checkpointingSettings).setSavepointRestoreSettings(savepointRestoreSettings).build();
}
use of org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration in project flink by apache.
the class CheckpointCoordinatorTest method testMinDelayBetweenSavepoints.
/**
* Tests that no minimum delay between savepoints is enforced.
*/
@Test
public void testMinDelayBetweenSavepoints() throws Exception {
CheckpointCoordinatorConfiguration chkConfig = new CheckpointCoordinatorConfiguration.CheckpointCoordinatorConfigurationBuilder().setMinPauseBetweenCheckpoints(// very long min delay => should not affect savepoints
100000000L).setMaxConcurrentCheckpoints(1).build();
CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinatorBuilder().setCheckpointCoordinatorConfiguration(chkConfig).setCompletedCheckpointStore(new StandaloneCompletedCheckpointStore(2)).setTimer(manuallyTriggeredScheduledExecutor).build();
String savepointDir = tmpFolder.newFolder().getAbsolutePath();
CompletableFuture<CompletedCheckpoint> savepoint0 = checkpointCoordinator.triggerSavepoint(savepointDir, SavepointFormatType.CANONICAL);
assertFalse("Did not trigger savepoint", savepoint0.isDone());
CompletableFuture<CompletedCheckpoint> savepoint1 = checkpointCoordinator.triggerSavepoint(savepointDir, SavepointFormatType.CANONICAL);
assertFalse("Did not trigger savepoint", savepoint1.isDone());
}
use of org.apache.flink.runtime.jobgraph.tasks.CheckpointCoordinatorConfiguration in project flink by apache.
the class CheckpointCoordinatorTest method testConcurrentSavepoints.
/**
* Tests that the savepoints can be triggered concurrently.
*/
@Test
public void testConcurrentSavepoints() throws Exception {
int numSavepoints = 5;
JobVertexID jobVertexID1 = new JobVertexID();
ExecutionGraph graph = new CheckpointCoordinatorTestingUtils.CheckpointExecutionGraphBuilder().addJobVertex(jobVertexID1).build();
ExecutionVertex vertex1 = graph.getJobVertex(jobVertexID1).getTaskVertices()[0];
ExecutionAttemptID attemptID1 = vertex1.getCurrentExecutionAttempt().getAttemptId();
StandaloneCheckpointIDCounter checkpointIDCounter = new StandaloneCheckpointIDCounter();
CheckpointCoordinatorConfiguration chkConfig = new CheckpointCoordinatorConfiguration.CheckpointCoordinatorConfigurationBuilder().setMaxConcurrentCheckpoints(// max one checkpoint at a time => should not affect savepoints
1).build();
CheckpointCoordinator checkpointCoordinator = new CheckpointCoordinatorBuilder().setExecutionGraph(graph).setCheckpointCoordinatorConfiguration(chkConfig).setCheckpointIDCounter(checkpointIDCounter).setCompletedCheckpointStore(new StandaloneCompletedCheckpointStore(2)).setTimer(manuallyTriggeredScheduledExecutor).build();
List<CompletableFuture<CompletedCheckpoint>> savepointFutures = new ArrayList<>();
String savepointDir = tmpFolder.newFolder().getAbsolutePath();
// Trigger savepoints
for (int i = 0; i < numSavepoints; i++) {
savepointFutures.add(checkpointCoordinator.triggerSavepoint(savepointDir, SavepointFormatType.CANONICAL));
}
// After triggering multiple savepoints, all should in progress
for (CompletableFuture<CompletedCheckpoint> savepointFuture : savepointFutures) {
assertFalse(savepointFuture.isDone());
}
manuallyTriggeredScheduledExecutor.triggerAll();
// ACK all savepoints
long checkpointId = checkpointIDCounter.getLast();
for (int i = 0; i < numSavepoints; i++, checkpointId--) {
checkpointCoordinator.receiveAcknowledgeMessage(new AcknowledgeCheckpoint(graph.getJobID(), attemptID1, checkpointId), TASK_MANAGER_LOCATION_INFO);
}
// After ACKs, all should be completed
for (CompletableFuture<CompletedCheckpoint> savepointFuture : savepointFutures) {
assertNotNull(savepointFuture.get());
}
}
Aggregations