use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class ClusterClient method runDetached.
/**
* Submits a JobGraph detached.
* @param jobGraph The JobGraph
* @param classLoader User code class loader to deserialize the results and errors (may contain custom classes).
* @return JobSubmissionResult
* @throws ProgramInvocationException
*/
public JobSubmissionResult runDetached(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException {
waitForClusterToBeReady();
final ActorGateway jobManagerGateway;
try {
jobManagerGateway = getJobManagerGateway();
} catch (Exception e) {
throw new ProgramInvocationException("Failed to retrieve the JobManager gateway.", e);
}
try {
logAndSysout("Submitting Job with JobID: " + jobGraph.getJobID() + ". Returning after job submission.");
JobClient.submitJobDetached(jobManagerGateway, flinkConfig, jobGraph, timeout, classLoader);
return new JobSubmissionResult(jobGraph.getJobID());
} catch (JobExecutionException e) {
throw new ProgramInvocationException("The program execution failed: " + e.getMessage(), e);
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class ClusterClient method run.
/**
* Submits a JobGraph blocking.
* @param jobGraph The JobGraph
* @param classLoader User code class loader to deserialize the results and errors (may contain custom classes).
* @return JobExecutionResult
* @throws ProgramInvocationException
*/
public JobExecutionResult run(JobGraph jobGraph, ClassLoader classLoader) throws ProgramInvocationException {
waitForClusterToBeReady();
final LeaderRetrievalService leaderRetrievalService;
try {
leaderRetrievalService = LeaderRetrievalUtils.createLeaderRetrievalService(flinkConfig, true);
} catch (Exception e) {
throw new ProgramInvocationException("Could not create the leader retrieval service", e);
}
try {
logAndSysout("Submitting job with JobID: " + jobGraph.getJobID() + ". Waiting for job completion.");
this.lastJobExecutionResult = JobClient.submitJobAndWait(actorSystemLoader.get(), flinkConfig, leaderRetrievalService, jobGraph, timeout, printStatusDuringExecution, classLoader);
return this.lastJobExecutionResult;
} catch (JobExecutionException e) {
throw new ProgramInvocationException("The program execution failed: " + e.getMessage(), e);
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class ElasticsearchSinkTestBase method runTransportClientFailsTest.
/**
* Tests whether the Elasticsearch sink fails when there is no cluster to connect to.
*/
public void runTransportClientFailsTest() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
Map<String, String> userConfig = new HashMap<>();
userConfig.put(ElasticsearchSinkBase.CONFIG_KEY_BULK_FLUSH_MAX_ACTIONS, "1");
userConfig.put("cluster.name", "my-transport-client-cluster");
source.addSink(createElasticsearchSinkForEmbeddedNode(userConfig, new SourceSinkDataTestKit.TestElasticsearchSinkFunction("test")));
try {
env.execute("Elasticsearch Transport Client Test");
} catch (JobExecutionException expectedException) {
assertTrue(expectedException.getCause().getMessage().contains("not connected to any Elasticsearch nodes"));
return;
}
fail();
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class KafkaConsumerTestBase method runFailOnNoBrokerTest.
// ------------------------------------------------------------------------
// Suite of Tests
//
// The tests here are all not activated (by an @Test tag), but need
// to be invoked from the extending classes. That way, the classes can
// select which tests to run.
// ------------------------------------------------------------------------
/**
* Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
* and a wrong broker was specified
*
* @throws Exception
*/
public void runFailOnNoBrokerTest() throws Exception {
try {
Properties properties = new Properties();
StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
see.getConfig().disableSysoutLogging();
see.setRestartStrategy(RestartStrategies.noRestart());
see.setParallelism(1);
// use wrong ports for the consumers
properties.setProperty("bootstrap.servers", "localhost:80");
properties.setProperty("zookeeper.connect", "localhost:80");
properties.setProperty("group.id", "test");
// let the test fail fast
properties.setProperty("request.timeout.ms", "3000");
properties.setProperty("socket.timeout.ms", "3000");
properties.setProperty("session.timeout.ms", "2000");
properties.setProperty("fetch.max.wait.ms", "2000");
properties.setProperty("heartbeat.interval.ms", "1000");
properties.putAll(secureProps);
FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
DataStream<String> stream = see.addSource(source);
stream.print();
see.execute("No broker test");
} catch (ProgramInvocationException pie) {
if (kafkaServer.getVersion().equals("0.9") || kafkaServer.getVersion().equals("0.10")) {
assertTrue(pie.getCause() instanceof JobExecutionException);
JobExecutionException jee = (JobExecutionException) pie.getCause();
assertTrue(jee.getCause() instanceof TimeoutException);
TimeoutException te = (TimeoutException) jee.getCause();
assertEquals("Timeout expired while fetching topic metadata", te.getMessage());
} else {
assertTrue(pie.getCause() instanceof JobExecutionException);
JobExecutionException jee = (JobExecutionException) pie.getCause();
assertTrue(jee.getCause() instanceof RuntimeException);
RuntimeException re = (RuntimeException) jee.getCause();
assertTrue(re.getMessage().contains("Unable to retrieve any partitions for the requested topics [doesntexist]"));
}
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class JarRunHandler method handleJsonRequest.
@Override
public String handleJsonRequest(Map<String, String> pathParams, Map<String, String> queryParams, ActorGateway jobManager) throws Exception {
try {
JarActionHandlerConfig config = JarActionHandlerConfig.fromParams(pathParams, queryParams);
Tuple2<JobGraph, ClassLoader> graph = getJobGraphAndClassLoader(config);
try {
graph.f0.uploadUserJars(jobManager, timeout, clientConfig);
} catch (IOException e) {
throw new ProgramInvocationException("Failed to upload jar files to the job manager", e);
}
try {
JobClient.submitJobDetached(jobManager, clientConfig, graph.f0, timeout, graph.f1);
} catch (JobExecutionException e) {
throw new ProgramInvocationException("Failed to submit the job to the job manager", e);
}
StringWriter writer = new StringWriter();
JsonGenerator gen = JsonFactory.jacksonFactory.createGenerator(writer);
gen.writeStartObject();
gen.writeStringField("jobid", graph.f0.getJobID().toString());
gen.writeEndObject();
gen.close();
return writer.toString();
} catch (Exception e) {
return sendError(e);
}
}
Aggregations