use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class JarRunHandler method handleJsonRequest.
@Override
public String handleJsonRequest(Map<String, String> pathParams, Map<String, String> queryParams, ActorGateway jobManager) throws Exception {
try {
JarActionHandlerConfig config = JarActionHandlerConfig.fromParams(pathParams, queryParams);
Tuple2<JobGraph, ClassLoader> graph = getJobGraphAndClassLoader(config);
try {
graph.f0.uploadUserJars(jobManager, timeout, clientConfig);
} catch (IOException e) {
throw new ProgramInvocationException("Failed to upload jar files to the job manager", e);
}
try {
JobClient.submitJobDetached(jobManager, clientConfig, graph.f0, timeout, graph.f1);
} catch (JobExecutionException e) {
throw new ProgramInvocationException("Failed to submit the job to the job manager", e);
}
StringWriter writer = new StringWriter();
JsonGenerator gen = JsonFactory.jacksonFactory.createGenerator(writer);
gen.writeStartObject();
gen.writeStringField("jobid", graph.f0.getJobID().toString());
gen.writeEndObject();
gen.close();
return writer.toString();
} catch (Exception e) {
return sendError(e);
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class ClusterClient method runDetached.
/**
* Submits a JobGraph detached.
* @param jobGraph The JobGraph
* @param classLoader User code class loader to deserialize the results and errors (may contain custom classes).
* @return JobSubmissionResult
* @throws ProgramInvocationException
*/
public JobSubmissionResult runDetached(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException {
waitForClusterToBeReady();
final ActorGateway jobManagerGateway;
try {
jobManagerGateway = getJobManagerGateway();
} catch (Exception e) {
throw new ProgramInvocationException("Failed to retrieve the JobManager gateway.", e);
}
try {
logAndSysout("Submitting Job with JobID: " + jobGraph.getJobID() + ". Returning after job submission.");
JobClient.submitJobDetached(jobManagerGateway, flinkConfig, jobGraph, timeout, classLoader);
return new JobSubmissionResult(jobGraph.getJobID());
} catch (JobExecutionException e) {
throw new ProgramInvocationException("The program execution failed: " + e.getMessage(), e);
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class ClusterClient method run.
/**
* Submits a JobGraph blocking.
* @param jobGraph The JobGraph
* @param classLoader User code class loader to deserialize the results and errors (may contain custom classes).
* @return JobExecutionResult
* @throws ProgramInvocationException
*/
public JobExecutionResult run(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException {
waitForClusterToBeReady();
final LeaderRetrievalService leaderRetrievalService;
try {
leaderRetrievalService = LeaderRetrievalUtils.createLeaderRetrievalService(flinkConfig, true);
} catch (Exception e) {
throw new ProgramInvocationException("Could not create the leader retrieval service", e);
}
try {
logAndSysout("Submitting job with JobID: " + jobGraph.getJobID() + ". Waiting for job completion.");
this.lastJobExecutionResult = JobClient.submitJobAndWait(actorSystemLoader.get(), flinkConfig, leaderRetrievalService, jobGraph, timeout, printStatusDuringExecution, classLoader);
return this.lastJobExecutionResult;
} catch (JobExecutionException e) {
throw new ProgramInvocationException("The program execution failed: " + e.getMessage(), e);
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class ElasticsearchSinkTestBase method runTransportClientFailsTest.
/**
* Tests whether the Elasticsearch sink fails when there is no cluster to connect to.
*/
public void runTransportClientFailsTest() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
Map<String, String> userConfig = new HashMap<>();
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
userConfig.put("cluster.name", "my-transport-client-cluster");
source.addSink(createElasticsearchSinkForEmbeddedNode(userConfig, new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test")));
try {
env.execute("Elasticsearch Transport Client Test");
} catch (JobExecutionException expectedException) {
assertTrue(expectedException.getCause().getMessage().contains("not connected to any Elasticsearch nodes"));
return;
}
fail();
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class JobMaster method jobStatusChanged.
private void jobStatusChanged(final JobStatus newJobStatus, long timestamp, final Throwable error) {
validateRunsInMainThread();
final JobID jobID = executionGraph.getJobID();
final String jobName = executionGraph.getJobName();
log.info("Status of job {} ({}) changed to {}.", jobID, jobName, newJobStatus, error);
if (newJobStatus.isGloballyTerminalState()) {
switch(newJobStatus) {
case FINISHED:
try {
// TODO get correct job duration
// job done, let's get the accumulators
Map<String, Object> accumulatorResults = executionGraph.getAccumulators();
JobExecutionResult result = new JobExecutionResult(jobID, 0L, accumulatorResults);
jobCompletionActions.jobFinished(result);
} catch (Exception e) {
log.error("Cannot fetch final accumulators for job {} ({})", jobName, jobID, e);
final JobExecutionException exception = new JobExecutionException(jobID, "Failed to retrieve accumulator results. " + "The job is registered as 'FINISHED (successful), but this notification describes " + "a failure, since the resulting accumulators could not be fetched.", e);
jobCompletionActions.jobFailed(exception);
}
break;
case CANCELED:
{
final JobExecutionException exception = new JobExecutionException(jobID, "Job was cancelled.", new Exception("The job was cancelled"));
jobCompletionActions.jobFailed(exception);
break;
}
case FAILED:
{
final Throwable unpackedError = SerializedThrowable.get(error, userCodeLoader);
final JobExecutionException exception = new JobExecutionException(jobID, "Job execution failed.", unpackedError);
jobCompletionActions.jobFailed(exception);
break;
}
default:
// this can happen only if the enum is buggy
throw new IllegalStateException(newJobStatus.toString());
}
}
}
Aggregations