use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class DispatcherTest method testCancellationOfNonCanceledTerminalJobFailsWithAppropriateException.
@Test
public void testCancellationOfNonCanceledTerminalJobFailsWithAppropriateException() throws Exception {
final CompletableFuture<JobManagerRunnerResult> jobTerminationFuture = new CompletableFuture<>();
dispatcher = createAndStartDispatcher(heartbeatServices, haServices, new FinishingJobManagerRunnerFactory(jobTerminationFuture, () -> {
}));
jobMasterLeaderElectionService.isLeader(UUID.randomUUID());
DispatcherGateway dispatcherGateway = dispatcher.getSelfGateway(DispatcherGateway.class);
JobID jobId = jobGraph.getJobID();
dispatcherGateway.submitJob(jobGraph, TIMEOUT).get();
jobTerminationFuture.complete(JobManagerRunnerResult.forSuccess(new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().setJobID(jobId).setState(JobStatus.FINISHED).build())));
// wait for job to finish
dispatcherGateway.requestJobResult(jobId, TIMEOUT).get();
// sanity check
assertThat(dispatcherGateway.requestJobStatus(jobId, TIMEOUT).get(), is(JobStatus.FINISHED));
final CompletableFuture<Acknowledge> cancelFuture = dispatcherGateway.cancelJob(jobId, TIMEOUT);
assertThat(cancelFuture, FlinkMatchers.futureWillCompleteExceptionally(FlinkJobTerminatedWithoutCancellationException.class, Duration.ofHours(8)));
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class FileExecutionGraphInfoStoreTest method testAvailableJobDetails.
/**
* Tests that we obtain the correct collection of available job details.
*/
@Test
public void testAvailableJobDetails() throws IOException {
final int numberExecutionGraphs = 10;
final Collection<ExecutionGraphInfo> executionGraphInfos = generateTerminalExecutionGraphInfos(numberExecutionGraphs);
final Collection<JobDetails> jobDetails = generateJobDetails(executionGraphInfos);
final File rootDir = temporaryFolder.newFolder();
try (final FileExecutionGraphInfoStore executionGraphInfoStore = createDefaultExecutionGraphInfoStore(rootDir)) {
for (ExecutionGraphInfo executionGraphInfo : executionGraphInfos) {
executionGraphInfoStore.put(executionGraphInfo);
}
assertThat(executionGraphInfoStore.getAvailableJobDetails(), Matchers.containsInAnyOrder(jobDetails.toArray()));
}
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class FileExecutionGraphInfoStoreTest method testCloseCleansUp.
/**
* Tests that all persisted files are cleaned up after closing the store.
*/
@Test
public void testCloseCleansUp() throws IOException {
final File rootDir = temporaryFolder.newFolder();
assertThat(rootDir.listFiles().length, Matchers.equalTo(0));
try (final FileExecutionGraphInfoStore executionGraphInfoStore = createDefaultExecutionGraphInfoStore(rootDir)) {
assertThat(rootDir.listFiles().length, Matchers.equalTo(1));
final File storageDirectory = executionGraphInfoStore.getStorageDir();
assertThat(storageDirectory.listFiles().length, Matchers.equalTo(0));
executionGraphInfoStore.put(new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().setState(JobStatus.FINISHED).build()));
assertThat(storageDirectory.listFiles().length, Matchers.equalTo(1));
}
assertThat(rootDir.listFiles().length, Matchers.equalTo(0));
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class FileExecutionGraphInfoStoreTest method testCacheLoading.
/**
* Tests that evicted {@link ExecutionGraphInfo} are loaded from disk again.
*/
@Test
public void testCacheLoading() throws IOException {
final File rootDir = temporaryFolder.newFolder();
try (final FileExecutionGraphInfoStore executionGraphInfoStore = new FileExecutionGraphInfoStore(rootDir, Time.hours(1L), Integer.MAX_VALUE, 100L << 10, TestingUtils.defaultScheduledExecutor(), Ticker.systemTicker())) {
final LoadingCache<JobID, ExecutionGraphInfo> executionGraphInfoCache = executionGraphInfoStore.getExecutionGraphInfoCache();
Collection<ExecutionGraphInfo> executionGraphInfos = new ArrayList<>(64);
boolean continueInserting = true;
// insert execution graphs until the first one got evicted
while (continueInserting) {
// has roughly a size of 1.4 KB
final ExecutionGraphInfo executionGraphInfo = new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().setState(JobStatus.FINISHED).build());
executionGraphInfoStore.put(executionGraphInfo);
executionGraphInfos.add(executionGraphInfo);
continueInserting = executionGraphInfoCache.size() == executionGraphInfos.size();
}
final File storageDirectory = executionGraphInfoStore.getStorageDir();
assertThat(storageDirectory.listFiles().length, Matchers.equalTo(executionGraphInfos.size()));
for (ExecutionGraphInfo executionGraphInfo : executionGraphInfos) {
assertThat(executionGraphInfoStore.get(executionGraphInfo.getJobId()), matchesPartiallyWith(executionGraphInfo));
}
}
}
use of org.apache.flink.runtime.scheduler.ExecutionGraphInfo in project flink by apache.
the class FileExecutionGraphInfoStoreTest method testMaximumCapacity.
/**
* Tests that the size of {@link FileExecutionGraphInfoStore} is no more than the configured max
* capacity and the old execution graphs will be purged if the total added number exceeds the
* max capacity.
*/
@Test
public void testMaximumCapacity() throws IOException {
final File rootDir = temporaryFolder.newFolder();
final int maxCapacity = 10;
final int numberExecutionGraphs = 10;
final Collection<ExecutionGraphInfo> oldExecutionGraphInfos = generateTerminalExecutionGraphInfos(numberExecutionGraphs);
final Collection<ExecutionGraphInfo> newExecutionGraphInfos = generateTerminalExecutionGraphInfos(numberExecutionGraphs);
final Collection<JobDetails> jobDetails = generateJobDetails(newExecutionGraphInfos);
try (final FileExecutionGraphInfoStore executionGraphInfoStore = new FileExecutionGraphInfoStore(rootDir, Time.hours(1L), maxCapacity, 10000L, TestingUtils.defaultScheduledExecutor(), Ticker.systemTicker())) {
for (ExecutionGraphInfo executionGraphInfo : oldExecutionGraphInfos) {
executionGraphInfoStore.put(executionGraphInfo);
// no more than the configured maximum capacity
assertTrue(executionGraphInfoStore.size() <= maxCapacity);
}
for (ExecutionGraphInfo executionGraphInfo : newExecutionGraphInfos) {
executionGraphInfoStore.put(executionGraphInfo);
// equals to the configured maximum capacity
assertEquals(maxCapacity, executionGraphInfoStore.size());
}
// the older execution graphs are purged
assertThat(executionGraphInfoStore.getAvailableJobDetails(), Matchers.containsInAnyOrder(jobDetails.toArray()));
}
}
Aggregations