use of org.apache.flink.api.common.JobStatus in project flink by apache.
the class AbstractOperatorRestoreTestBase method restoreJob.
private void restoreJob(ClusterClient<?> clusterClient, Deadline deadline, String savepointPath) throws Exception {
JobGraph jobToRestore = createJobGraph(ExecutionMode.RESTORE);
jobToRestore.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath, allowNonRestoredState));
assertNotNull("Job doesn't have a JobID.", jobToRestore.getJobID());
clusterClient.submitJob(jobToRestore).get();
CompletableFuture<JobStatus> jobStatusFuture = FutureUtils.retrySuccessfulWithDelay(() -> clusterClient.getJobStatus(jobToRestore.getJobID()), Time.milliseconds(50), deadline, (jobStatus) -> jobStatus == JobStatus.FINISHED, TestingUtils.defaultScheduledExecutor());
assertEquals(JobStatus.FINISHED, jobStatusFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS));
}
use of org.apache.flink.api.common.JobStatus in project flink by apache.
the class CheckpointStoreITCase method testJobClientRemainsResponsiveDuringCompletedCheckpointStoreRecovery.
@Test
public void testJobClientRemainsResponsiveDuringCompletedCheckpointStoreRecovery() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(10);
env.setRestartStrategy(fixedDelayRestart(2, /* failure on processing + on recovery */
0));
env.addSource(emitUntil(() -> FailingMapper.failedAndProcessed)).map(new FailingMapper()).addSink(new DiscardingSink<>());
final JobClient jobClient = env.executeAsync();
BlockingHighAvailabilityServiceFactory.fetchRemoteCheckpointsStart.await();
for (int i = 0; i < 10; i++) {
final JobStatus jobStatus = jobClient.getJobStatus().get();
assertEquals(JobStatus.INITIALIZING, jobStatus);
}
BlockingHighAvailabilityServiceFactory.fetchRemoteCheckpointsFinished.countDown();
// Await for job to finish.
jobClient.getJobExecutionResult().get();
checkState(FailingMapper.failedAndProcessed);
}
Aggregations