use of org.apache.flink.api.common.JobStatus in project flink by apache.
the class MiniDispatcher method jobReachedTerminalState.
@Override
protected CleanupJobState jobReachedTerminalState(ExecutionGraphInfo executionGraphInfo) {
final ArchivedExecutionGraph archivedExecutionGraph = executionGraphInfo.getArchivedExecutionGraph();
final CleanupJobState cleanupHAState = super.jobReachedTerminalState(executionGraphInfo);
JobStatus jobStatus = Objects.requireNonNull(archivedExecutionGraph.getState(), "JobStatus should not be null here.");
if (jobStatus.isGloballyTerminalState() && (jobCancelled || executionMode == ClusterEntrypoint.ExecutionMode.DETACHED)) {
// shut down if job is cancelled or we don't have to wait for the execution result
// retrieval
log.info("Shutting down cluster with state {}, jobCancelled: {}, executionMode: {}", jobStatus, jobCancelled, executionMode);
shutDownFuture.complete(ApplicationStatus.fromJobStatus(jobStatus));
}
return cleanupHAState;
}
use of org.apache.flink.api.common.JobStatus in project flink by apache.
the class JobDetails method createDetailsForJob.
public static JobDetails createDetailsForJob(AccessExecutionGraph job) {
JobStatus status = job.getState();
long started = job.getStatusTimestamp(JobStatus.INITIALIZING);
long finished = status.isGloballyTerminalState() ? job.getStatusTimestamp(status) : -1L;
long duration = (finished >= 0L ? finished : System.currentTimeMillis()) - started;
int[] countsPerStatus = new int[ExecutionState.values().length];
long lastChanged = 0;
int numTotalTasks = 0;
for (AccessExecutionJobVertex ejv : job.getVerticesTopologically()) {
AccessExecutionVertex[] taskVertices = ejv.getTaskVertices();
numTotalTasks += taskVertices.length;
for (AccessExecutionVertex taskVertex : taskVertices) {
ExecutionState state = taskVertex.getExecutionState();
countsPerStatus[state.ordinal()]++;
lastChanged = Math.max(lastChanged, taskVertex.getStateTimestamp(state));
}
}
lastChanged = Math.max(lastChanged, finished);
return new JobDetails(job.getJobID(), job.getJobName(), started, finished, duration, status, lastChanged, countsPerStatus, numTotalTasks);
}
use of org.apache.flink.api.common.JobStatus in project flink by apache.
the class JobExecutionResultHandler method handleRequest.
@Override
protected CompletableFuture<JobExecutionResultResponseBody> handleRequest(@Nonnull final HandlerRequest<EmptyRequestBody> request, @Nonnull final RestfulGateway gateway) throws RestHandlerException {
final JobID jobId = request.getPathParameter(JobIDPathParameter.class);
final CompletableFuture<JobStatus> jobStatusFuture = gateway.requestJobStatus(jobId, timeout);
return jobStatusFuture.thenCompose(jobStatus -> {
if (jobStatus.isGloballyTerminalState()) {
return gateway.requestJobResult(jobId, timeout).thenApply(JobExecutionResultResponseBody::created);
} else {
return CompletableFuture.completedFuture(JobExecutionResultResponseBody.inProgress());
}
}).exceptionally(throwable -> {
throw propagateException(throwable);
});
}
use of org.apache.flink.api.common.JobStatus in project flink by apache.
the class ExecutionGraphInfoStoreTestUtils method generateTerminalExecutionGraphInfos.
/**
* Generate a specified of ExecutionGraphInfo.
*
* @param number the given number
* @return the result ExecutionGraphInfo collection
*/
static Collection<ExecutionGraphInfo> generateTerminalExecutionGraphInfos(int number) {
final Collection<ExecutionGraphInfo> executionGraphInfos = new ArrayList<>(number);
for (int i = 0; i < number; i++) {
final JobStatus state = GLOBALLY_TERMINAL_JOB_STATUS.get(ThreadLocalRandom.current().nextInt(GLOBALLY_TERMINAL_JOB_STATUS.size()));
executionGraphInfos.add(new ExecutionGraphInfo(new ArchivedExecutionGraphBuilder().setState(state).build()));
}
return executionGraphInfos;
}
use of org.apache.flink.api.common.JobStatus in project flink by apache.
the class MemoryExecutionGraphInfoStoreTest method testStoredJobsOverview.
/**
* Tests that we obtain the correct jobs overview.
*/
@Test
public void testStoredJobsOverview() throws IOException {
final int numberExecutionGraphs = 10;
final Collection<ExecutionGraphInfo> executionGraphInfos = generateTerminalExecutionGraphInfos(numberExecutionGraphs);
final List<JobStatus> jobStatuses = executionGraphInfos.stream().map(ExecutionGraphInfo::getArchivedExecutionGraph).map(ArchivedExecutionGraph::getState).collect(Collectors.toList());
final JobsOverview expectedJobsOverview = JobsOverview.create(jobStatuses);
try (final MemoryExecutionGraphInfoStore executionGraphInfoStore = createMemoryExecutionGraphInfoStore()) {
for (ExecutionGraphInfo executionGraphInfo : executionGraphInfos) {
executionGraphInfoStore.put(executionGraphInfo);
}
assertThat(executionGraphInfoStore.getStoredJobsOverview(), Matchers.equalTo(expectedJobsOverview));
}
}
Aggregations