use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class DispatcherTest method testNoHistoryServerArchiveCreatedForSuspendedJob.
@Test
public void testNoHistoryServerArchiveCreatedForSuspendedJob() throws Exception {
final CompletableFuture<Void> archiveAttemptFuture = new CompletableFuture<>();
final CompletableFuture<JobManagerRunnerResult> jobTerminationFuture = new CompletableFuture<>();
dispatcher = createTestingDispatcherBuilder().setJobManagerRunnerFactory(new FinishingJobManagerRunnerFactory(jobTerminationFuture, () -> {
})).setHistoryServerArchivist(executionGraphInfo -> {
archiveAttemptFuture.complete(null);
return CompletableFuture.completedFuture(null);
}).build();
dispatcher.start();
jobMasterLeaderElectionService.isLeader(UUID.randomUUID());
DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class);
JobID jobId = jobGraph.getJobID();
dispatcherGateway.submitJob(jobGraph, TIMEOUT).get();
jobTerminationFuture.complete(JobManagerRunnerResult.forSuccess(new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().setJobID(jobId).setState(JobStatus.SUSPENDED).build())));
// wait for job to finish
dispatcherGateway.requestJobResult(jobId, TIMEOUT).get();
// sanity check
assertThat(dispatcherGateway.requestJobStatus(jobId, TIMEOUT).get(), is(JobStatus.SUSPENDED));
assertThat(archiveAttemptFuture.isDone(), is(false));
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class FileExecutionGraphInfoStoreTest method testExecutionGraphExpiration.
/**
* Tests that an expired execution graph is removed from the execution graph store.
*/
@Test
public void testExecutionGraphExpiration() throws Exception {
final File rootDir = temporaryFolder.newFolder();
final Time expirationTime = Time.milliseconds(1L);
final ManuallyTriggeredScheduledExecutor scheduledExecutor = new ManuallyTriggeredScheduledExecutor();
final ManualTicker manualTicker = new ManualTicker();
try (final FileExecutionGraphInfoStore executionGraphInfoStore = new FileExecutionGraphInfoStore(rootDir, expirationTime, Integer.MAX_VALUE, 10000L, scheduledExecutor, manualTicker)) {
final ExecutionGraphInfo executionGraphInfo = new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().setState(JobStatus.FINISHED).build());
executionGraphInfoStore.put(executionGraphInfo);
// there should one execution graph
assertThat(executionGraphInfoStore.size(), Matchers.equalTo(1));
manualTicker.advanceTime(expirationTime.toMilliseconds(), TimeUnit.MILLISECONDS);
// this should trigger the cleanup after expiration
scheduledExecutor.triggerScheduledTasks();
assertThat(executionGraphInfoStore.size(), Matchers.equalTo(0));
assertThat(executionGraphInfoStore.get(executionGraphInfo.getJobId()), Matchers.nullValue());
final File storageDirectory = executionGraphInfoStore.getStorageDir();
// check that the persisted file has been deleted
assertThat(storageDirectory.listFiles().length, Matchers.equalTo(0));
}
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class FileExecutionGraphInfoStoreTest method assertPutJobGraphWithStatus.
private void assertPutJobGraphWithStatus(JobStatus jobStatus) throws IOException {
final ExecutionGraphInfo dummyExecutionGraphInfo = new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().setState(jobStatus).build());
final File rootDir = temporaryFolder.newFolder();
try (final FileExecutionGraphInfoStore executionGraphStore = createDefaultExecutionGraphInfoStore(rootDir)) {
final File storageDirectory = executionGraphStore.getStorageDir();
// check that the storage directory is empty
assertThat(storageDirectory.listFiles().length, Matchers.equalTo(0));
executionGraphStore.put(dummyExecutionGraphInfo);
// check that we have persisted the given execution graph
assertThat(storageDirectory.listFiles().length, Matchers.equalTo(1));
assertThat(executionGraphStore.get(dummyExecutionGraphInfo.getJobId()), new ExecutionGraphInfoStoreTestUtils.PartialExecutionGraphInfoMatcher(dummyExecutionGraphInfo));
}
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class FileExecutionGraphInfoStoreTest method testStoredJobsOverview.
/**
* Tests that we obtain the correct jobs overview.
*/
@Test
public void testStoredJobsOverview() throws IOException {
final int numberExecutionGraphs = 10;
final Collection<ExecutionGraphInfo> executionGraphInfos = generateTerminalExecutionGraphInfos(numberExecutionGraphs);
final List<JobStatus> jobStatuses = executionGraphInfos.stream().map(ExecutionGraphInfo::getArchivedExecutionGraph).map(ArchivedExecutionGraph::getState).collect(Collectors.toList());
final JobsOverview expectedJobsOverview = JobsOverview.create(jobStatuses);
final File rootDir = temporaryFolder.newFolder();
try (final FileExecutionGraphInfoStore executionGraphInfoStore = createDefaultExecutionGraphInfoStore(rootDir)) {
for (ExecutionGraphInfo executionGraphInfo : executionGraphInfos) {
executionGraphInfoStore.put(executionGraphInfo);
}
assertThat(executionGraphInfoStore.getStoredJobsOverview(), Matchers.equalTo(expectedJobsOverview));
}
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class DispatcherTest method testJobManagerRunnerInitializationFailureFailsJob.
@Test
public void testJobManagerRunnerInitializationFailureFailsJob() throws Exception {
final TestingJobMasterServiceLeadershipRunnerFactory testingJobManagerRunnerFactory = new TestingJobMasterServiceLeadershipRunnerFactory();
dispatcher = createAndStartDispatcher(heartbeatServices, haServices, testingJobManagerRunnerFactory);
jobMasterLeaderElectionService.isLeader(UUID.randomUUID());
DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class);
final JobGraph emptyJobGraph = JobGraphBuilder.newStreamingJobGraphBuilder().setJobId(jobId).build();
dispatcherGateway.submitJob(emptyJobGraph, TIMEOUT).get();
final TestingJobManagerRunner testingJobManagerRunner = testingJobManagerRunnerFactory.takeCreatedJobManagerRunner();
final FlinkException testFailure = new FlinkException("Test failure");
testingJobManagerRunner.completeResultFuture(JobManagerRunnerResult.forInitializationFailure(new ExecutionGraphInfo(ArchivedExecutionGraph.createSparseArchivedExecutionGraph(jobId, jobGraph.getName(), JobStatus.FAILED, testFailure, jobGraph.getCheckpointingSettings(), 1L)), testFailure));
// wait till job has failed
dispatcherGateway.requestJobResult(jobId, TIMEOUT).get();
// get failure cause
ArchivedExecutionGraph execGraph = dispatcherGateway.requestJob(jobGraph.getJobID(), TIMEOUT).get();
assertThat(execGraph.getState(), is(JobStatus.FAILED));
Assert.assertNotNull(execGraph.getFailureInfo());
Throwable throwable = execGraph.getFailureInfo().getException().deserializeError(ClassLoader.getSystemClassLoader());
// ensure correct exception type
assertThat(throwable, is(testFailure));
}
Aggregations