use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class KafkaConsumerTestBase method runFailOnDeployTest.
/**
* Tests that the source can be properly canceled when reading full partitions.
*/
public void runFailOnDeployTest() throws Exception {
final String topic = "failOnDeployTopic";
createTestTopic(topic, 2, 1);
DeserializationSchema<Integer> schema = new TypeInformationSerializationSchema<>(BasicTypeInfo.INT_TYPE_INFO, new ExecutionConfig());
StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
// needs to be more that the mini cluster has slots
env.setParallelism(12);
env.getConfig().disableSysoutLogging();
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(secureProps);
FlinkKafkaConsumerBase<Integer> kafkaSource = kafkaServer.getConsumer(topic, schema, props);
env.addSource(kafkaSource).addSink(new DiscardingSink<Integer>());
try {
env.execute("test fail on deploy");
fail("this test should fail with an exception");
} catch (ProgramInvocationException e) {
// validate that we failed due to a NoResourceAvailableException
Throwable cause = e.getCause();
int depth = 0;
boolean foundResourceException = false;
while (cause != null && depth++ < 20) {
if (cause instanceof NoResourceAvailableException) {
foundResourceException = true;
break;
}
cause = cause.getCause();
}
assertTrue("Wrong exception", foundResourceException);
}
deleteTestTopic(topic);
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class KafkaConsumerTestBase method runFailOnNoBrokerTest.
// ------------------------------------------------------------------------
// Suite of Tests
//
// The tests here are all not activated (by an @Test tag), but need
// to be invoked from the extending classes. That way, the classes can
// select which tests to run.
// ------------------------------------------------------------------------
/**
* Test that ensures the KafkaConsumer is properly failing if the topic doesnt exist
* and a wrong broker was specified
*
* @throws Exception
*/
public void runFailOnNoBrokerTest() throws Exception {
try {
Properties properties = new Properties();
StreamExecutionEnvironment see = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
see.getConfig().disableSysoutLogging();
see.setRestartStrategy(RestartStrategies.noRestart());
see.setParallelism(1);
// use wrong ports for the consumers
properties.setProperty("bootstrap.servers", "localhost:80");
properties.setProperty("zookeeper.connect", "localhost:80");
properties.setProperty("group.id", "test");
// let the test fail fast
properties.setProperty("request.timeout.ms", "3000");
properties.setProperty("socket.timeout.ms", "3000");
properties.setProperty("session.timeout.ms", "2000");
properties.setProperty("fetch.max.wait.ms", "2000");
properties.setProperty("heartbeat.interval.ms", "1000");
properties.putAll(secureProps);
FlinkKafkaConsumerBase<String> source = kafkaServer.getConsumer("doesntexist", new SimpleStringSchema(), properties);
DataStream<String> stream = see.addSource(source);
stream.print();
see.execute("No broker test");
} catch (ProgramInvocationException pie) {
if (kafkaServer.getVersion().equals("0.9") || kafkaServer.getVersion().equals("0.10")) {
assertTrue(pie.getCause() instanceof JobExecutionException);
JobExecutionException jee = (JobExecutionException) pie.getCause();
assertTrue(jee.getCause() instanceof TimeoutException);
TimeoutException te = (TimeoutException) jee.getCause();
assertEquals("Timeout expired while fetching topic metadata", te.getMessage());
} else {
assertTrue(pie.getCause() instanceof JobExecutionException);
JobExecutionException jee = (JobExecutionException) pie.getCause();
assertTrue(jee.getCause() instanceof RuntimeException);
RuntimeException re = (RuntimeException) jee.getCause();
assertTrue(re.getMessage().contains("Unable to retrieve any partitions for the requested topics [doesntexist]"));
}
}
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class JarActionHandler method getJobGraphAndClassLoader.
protected Tuple2<JobGraph, ClassLoader> getJobGraphAndClassLoader(JarActionHandlerConfig config) throws Exception {
// generate the graph
JobGraph graph = null;
PackagedProgram program = new PackagedProgram(new File(jarDir, config.getJarFile()), config.getEntryClass(), config.getProgramArgs());
ClassLoader classLoader = program.getUserCodeClassLoader();
Optimizer optimizer = new Optimizer(new DataStatistics(), new DefaultCostEstimator(), new Configuration());
FlinkPlan plan = ClusterClient.getOptimizedPlan(optimizer, program, config.getParallelism());
if (plan instanceof StreamingPlan) {
graph = ((StreamingPlan) plan).getJobGraph();
} else if (plan instanceof OptimizedPlan) {
graph = new JobGraphGenerator().compileJobGraph((OptimizedPlan) plan);
}
if (graph == null) {
throw new CompilerException("A valid job graph couldn't be generated for the jar.");
}
// Set the savepoint settings
graph.setSavepointRestoreSettings(config.getSavepointRestoreSettings());
for (URL jar : program.getAllLibraries()) {
try {
graph.addJar(new Path(jar.toURI()));
} catch (URISyntaxException e) {
throw new ProgramInvocationException("Invalid jar path. Unexpected error. :(");
}
}
return Tuple2.of(graph, classLoader);
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class JarRunHandler method handleJsonRequest.
@Override
public String handleJsonRequest(Map<String, String> pathParams, Map<String, String> queryParams, ActorGateway jobManager) throws Exception {
try {
JarActionHandlerConfig config = JarActionHandlerConfig.fromParams(pathParams, queryParams);
Tuple2<JobGraph, ClassLoader> graph = getJobGraphAndClassLoader(config);
try {
graph.f0.uploadUserJars(jobManager, timeout, clientConfig);
} catch (IOException e) {
throw new ProgramInvocationException("Failed to upload jar files to the job manager", e);
}
try {
JobClient.submitJobDetached(jobManager, clientConfig, graph.f0, timeout, graph.f1);
} catch (JobExecutionException e) {
throw new ProgramInvocationException("Failed to submit the job to the job manager", e);
}
StringWriter writer = new StringWriter();
JsonGenerator gen = JsonFactory.jacksonFactory.createGenerator(writer);
gen.writeStartObject();
gen.writeStringField("jobid", graph.f0.getJobID().toString());
gen.writeEndObject();
gen.close();
return writer.toString();
} catch (Exception e) {
return sendError(e);
}
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class ClassLoaderITCase method testDisposeSavepointWithCustomKvState.
/**
* Tests disposal of a savepoint, which contains custom user code KvState.
*/
@Test
public void testDisposeSavepointWithCustomKvState() throws Exception {
Deadline deadline = new FiniteDuration(100, TimeUnit.SECONDS).fromNow();
int port = testCluster.getLeaderRPCPort();
File checkpointDir = FOLDER.newFolder();
File outputDir = FOLDER.newFolder();
final PackagedProgram program = new PackagedProgram(new File(CUSTOM_KV_STATE_JAR_PATH), new String[] { CUSTOM_KV_STATE_JAR_PATH, "localhost", String.valueOf(port), String.valueOf(parallelism), checkpointDir.toURI().toString(), "5000", outputDir.toURI().toString() });
// Execute detached
Thread invokeThread = new Thread(new Runnable() {
@Override
public void run() {
try {
program.invokeInteractiveModeForExecution();
} catch (ProgramInvocationException ignored) {
ignored.printStackTrace();
}
}
});
LOG.info("Starting program invoke thread");
invokeThread.start();
// The job ID
JobID jobId = null;
ActorGateway jm = testCluster.getLeaderGateway(deadline.timeLeft());
LOG.info("Waiting for job status running.");
// Wait for running job
while (jobId == null && deadline.hasTimeLeft()) {
Future<Object> jobsFuture = jm.ask(JobManagerMessages.getRequestRunningJobsStatus(), deadline.timeLeft());
RunningJobsStatus runningJobs = (RunningJobsStatus) Await.result(jobsFuture, deadline.timeLeft());
for (JobStatusMessage runningJob : runningJobs.getStatusMessages()) {
jobId = runningJob.getJobId();
LOG.info("Job running. ID: " + jobId);
break;
}
// Retry if job is not available yet
if (jobId == null) {
Thread.sleep(100);
}
}
LOG.info("Wait for all tasks to be running.");
Future<Object> allRunning = jm.ask(new WaitForAllVerticesToBeRunning(jobId), deadline.timeLeft());
Await.ready(allRunning, deadline.timeLeft());
LOG.info("All tasks are running.");
// Trigger savepoint
String savepointPath = null;
for (int i = 0; i < 20; i++) {
LOG.info("Triggering savepoint (" + (i + 1) + "/20).");
Future<Object> savepointFuture = jm.ask(new TriggerSavepoint(jobId, Option.<String>empty()), deadline.timeLeft());
Object savepointResponse = Await.result(savepointFuture, deadline.timeLeft());
if (savepointResponse.getClass() == TriggerSavepointSuccess.class) {
savepointPath = ((TriggerSavepointSuccess) savepointResponse).savepointPath();
LOG.info("Triggered savepoint. Path: " + savepointPath);
} else if (savepointResponse.getClass() == JobManagerMessages.TriggerSavepointFailure.class) {
Throwable cause = ((JobManagerMessages.TriggerSavepointFailure) savepointResponse).cause();
LOG.info("Failed to trigger savepoint. Retrying...", cause);
// This can fail if the operators are not opened yet
Thread.sleep(500);
} else {
throw new IllegalStateException("Unexpected response to TriggerSavepoint");
}
}
assertNotNull("Failed to trigger savepoint", savepointPath);
// Upload JAR
LOG.info("Uploading JAR " + CUSTOM_KV_STATE_JAR_PATH + " for savepoint disposal.");
List<BlobKey> blobKeys = BlobClient.uploadJarFiles(jm, deadline.timeLeft(), testCluster.userConfiguration(), Collections.singletonList(new Path(CUSTOM_KV_STATE_JAR_PATH)));
// Dispose savepoint
LOG.info("Disposing savepoint at " + savepointPath);
Future<Object> disposeFuture = jm.ask(new DisposeSavepoint(savepointPath), deadline.timeLeft());
Object disposeResponse = Await.result(disposeFuture, deadline.timeLeft());
if (disposeResponse.getClass() == JobManagerMessages.getDisposeSavepointSuccess().getClass()) {
// Success :-)
LOG.info("Disposed savepoint at " + savepointPath);
} else if (disposeResponse instanceof DisposeSavepointFailure) {
throw new IllegalStateException("Failed to dispose savepoint " + disposeResponse);
} else {
throw new IllegalStateException("Unexpected response to DisposeSavepoint");
}
}
Aggregations