use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class RemoteExecutorHostnameResolutionTest method testUnresolvableHostname2.
@Test
public void testUnresolvableHostname2() {
InetSocketAddress add = new InetSocketAddress(nonExistingHostname, port);
RemoteExecutor exec = new RemoteExecutor(add, new Configuration(), Collections.<URL>emptyList(), Collections.<URL>emptyList());
try {
exec.executePlan(getProgram());
fail("This should fail with an ProgramInvocationException");
} catch (ProgramInvocationException e) {
// that is what we want!
assertTrue(e.getCause() instanceof UnknownHostException);
} catch (Exception e) {
System.err.println("Wrong exception!");
e.printStackTrace();
fail(e.getMessage());
}
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class KafkaConsumerTestBase method runSimpleConcurrentProducerConsumerTopology.
/**
* Ensure Kafka is working on both producer and consumer side.
* This executes a job that contains two Flink pipelines.
*
* <pre>
* (generator source) --> (kafka sink)-[KAFKA-TOPIC]-(kafka source) --> (validating sink)
* </pre>
*
* We need to externally retry this test. We cannot let Flink's retry mechanism do it, because the Kafka producer
* does not guarantee exactly-once output. Hence a recovery would introduce duplicates that
* cause the test to fail.
*
* This test also ensures that FLINK-3156 doesn't happen again:
*
* The following situation caused a NPE in the FlinkKafkaConsumer
*
* topic-1 <-- elements are only produced into topic1.
* topic-2
*
* Therefore, this test is consuming as well from an empty topic.
*
*/
@RetryOnException(times = 2, exception = kafka.common.NotLeaderForPartitionException.class)
public void runSimpleConcurrentProducerConsumerTopology() throws Exception {
final String topic = "concurrentProducerConsumerTopic_" + UUID.randomUUID().toString();
final String additionalEmptyTopic = "additionalEmptyTopic_" + UUID.randomUUID().toString();
final int parallelism = 3;
final int elementsPerPartition = 100;
final int totalElements = parallelism * elementsPerPartition;
createTestTopic(topic, parallelism, 2);
// create an empty topic which will remain empty all the time
createTestTopic(additionalEmptyTopic, parallelism, 1);
final StreamExecutionEnvironment env = StreamExecutionEnvironment.createRemoteEnvironment("localhost", flinkPort);
env.setParallelism(parallelism);
env.enableCheckpointing(500);
// fail immediately
env.setRestartStrategy(RestartStrategies.noRestart());
env.getConfig().disableSysoutLogging();
TypeInformation<Tuple2<Long, String>> longStringType = TypeInfoParser.parse("Tuple2<Long, String>");
TypeInformationSerializationSchema<Tuple2<Long, String>> sourceSchema = new TypeInformationSerializationSchema<>(longStringType, env.getConfig());
TypeInformationSerializationSchema<Tuple2<Long, String>> sinkSchema = new TypeInformationSerializationSchema<>(longStringType, env.getConfig());
// ----------- add producer dataflow ----------
DataStream<Tuple2<Long, String>> stream = env.addSource(new RichParallelSourceFunction<Tuple2<Long, String>>() {
private boolean running = true;
@Override
public void run(SourceContext<Tuple2<Long, String>> ctx) throws InterruptedException {
int cnt = getRuntimeContext().getIndexOfThisSubtask() * elementsPerPartition;
int limit = cnt + elementsPerPartition;
while (running && cnt < limit) {
ctx.collect(new Tuple2<>(1000L + cnt, "kafka-" + cnt));
cnt++;
// we delay data generation a bit so that we are sure that some checkpoints are
// triggered (for FLINK-3156)
Thread.sleep(50);
}
}
@Override
public void cancel() {
running = false;
}
});
Properties producerProperties = FlinkKafkaProducerBase.getPropertiesFromBrokerList(brokerConnectionStrings);
producerProperties.setProperty("retries", "3");
producerProperties.putAll(secureProps);
kafkaServer.produceIntoKafka(stream, topic, new KeyedSerializationSchemaWrapper<>(sinkSchema), producerProperties, null);
// ----------- add consumer dataflow ----------
List<String> topics = new ArrayList<>();
topics.add(topic);
topics.add(additionalEmptyTopic);
Properties props = new Properties();
props.putAll(standardProps);
props.putAll(secureProps);
FlinkKafkaConsumerBase<Tuple2<Long, String>> source = kafkaServer.getConsumer(topics, sourceSchema, props);
DataStreamSource<Tuple2<Long, String>> consuming = env.addSource(source).setParallelism(parallelism);
consuming.addSink(new RichSinkFunction<Tuple2<Long, String>>() {
private int elCnt = 0;
private BitSet validator = new BitSet(totalElements);
@Override
public void invoke(Tuple2<Long, String> value) throws Exception {
String[] sp = value.f1.split("-");
int v = Integer.parseInt(sp[1]);
assertEquals(value.f0 - 1000, (long) v);
assertFalse("Received tuple twice", validator.get(v));
validator.set(v);
elCnt++;
if (elCnt == totalElements) {
// check if everything in the bitset is set to true
int nc;
if ((nc = validator.nextClearBit(0)) != totalElements) {
fail("The bitset was not set to 1 on all elements. Next clear:" + nc + " Set: " + validator);
}
throw new SuccessException();
}
}
@Override
public void close() throws Exception {
super.close();
}
}).setParallelism(1);
try {
tryExecutePropagateExceptions(env, "runSimpleConcurrentProducerConsumerTopology");
} catch (ProgramInvocationException | JobExecutionException e) {
// look for NotLeaderForPartitionException
Throwable cause = e.getCause();
// search for nested SuccessExceptions
int depth = 0;
while (cause != null && depth++ < 20) {
if (cause instanceof kafka.common.NotLeaderForPartitionException) {
throw (Exception) cause;
}
cause = cause.getCause();
}
throw e;
}
deleteTestTopic(topic);
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class FlinkClient method submitTopologyWithOpts.
/**
* Parameter {@code uploadedJarLocation} is actually used to point to the local jar, because Flink does not support
* uploading a jar file before hand. Jar files are always uploaded directly when a program is submitted.
*/
public void submitTopologyWithOpts(final String name, final String uploadedJarLocation, final FlinkTopology topology) throws AlreadyAliveException, InvalidTopologyException {
if (this.getTopologyJobId(name) != null) {
throw new AlreadyAliveException();
}
final URI uploadedJarUri;
final URL uploadedJarUrl;
try {
uploadedJarUri = new File(uploadedJarLocation).getAbsoluteFile().toURI();
uploadedJarUrl = uploadedJarUri.toURL();
JobWithJars.checkJarFile(uploadedJarUrl);
} catch (final IOException e) {
throw new RuntimeException("Problem with jar file " + uploadedJarLocation, e);
}
try {
FlinkClient.addStormConfigToTopology(topology, conf);
} catch (ClassNotFoundException e) {
LOG.error("Could not register class for Kryo serialization.", e);
throw new InvalidTopologyException("Could not register class for Kryo serialization.");
}
final StreamGraph streamGraph = topology.getExecutionEnvironment().getStreamGraph();
streamGraph.setJobName(name);
final JobGraph jobGraph = streamGraph.getJobGraph();
jobGraph.addJar(new Path(uploadedJarUri));
final Configuration configuration = jobGraph.getJobConfiguration();
configuration.setString(ConfigConstants.JOB_MANAGER_IPC_ADDRESS_KEY, jobManagerHost);
configuration.setInteger(ConfigConstants.JOB_MANAGER_IPC_PORT_KEY, jobManagerPort);
final ClusterClient client;
try {
client = new StandaloneClusterClient(configuration);
} catch (final IOException e) {
throw new RuntimeException("Could not establish a connection to the job manager", e);
}
try {
ClassLoader classLoader = JobWithJars.buildUserCodeClassLoader(Collections.<URL>singletonList(uploadedJarUrl), Collections.<URL>emptyList(), this.getClass().getClassLoader());
client.runDetached(jobGraph, classLoader);
} catch (final ProgramInvocationException e) {
throw new RuntimeException("Cannot execute job due to ProgramInvocationException", e);
}
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class CliFrontend method executeProgram.
// --------------------------------------------------------------------------------------------
// Interaction with programs and JobManager
// --------------------------------------------------------------------------------------------
protected int executeProgram(PackagedProgram program, ClusterClient client, int parallelism) {
logAndSysout("Starting execution of program");
JobSubmissionResult result;
try {
result = client.run(program, parallelism);
} catch (ProgramParametrizationException e) {
return handleParametrizationException(e);
} catch (ProgramMissingJobException e) {
return handleMissingJobException();
} catch (ProgramInvocationException e) {
return handleError(e);
} finally {
program.deleteExtractedLibraries();
}
if (null == result) {
logAndSysout("No JobSubmissionResult returned, please make sure you called " + "ExecutionEnvironment.execute()");
return 1;
}
if (result.isJobExecutionResult()) {
logAndSysout("Program execution finished");
JobExecutionResult execResult = result.getJobExecutionResult();
System.out.println("Job with JobID " + execResult.getJobID() + " has finished.");
System.out.println("Job Runtime: " + execResult.getNetRuntime() + " ms");
Map<String, Object> accumulatorsResult = execResult.getAllAccumulatorResults();
if (accumulatorsResult.size() > 0) {
System.out.println("Accumulator Results: ");
System.out.println(AccumulatorHelper.getResultsFormated(accumulatorsResult));
}
} else {
logAndSysout("Job has been submitted with JobID " + result.getJobID());
}
return 0;
}
use of org.apache.flink.client.program.ProgramInvocationException in project flink by apache.
the class CliFrontendPackageProgramTest method testFileNotJarFile.
@Test
public void testFileNotJarFile() {
try {
CliFrontend frontend = new CliFrontend(CliFrontendTestUtils.getConfigDir());
ProgramOptions options = mock(ProgramOptions.class);
when(options.getJarFilePath()).thenReturn(getNonJarFilePath());
try {
frontend.buildProgram(options);
fail("should throw an exception");
} catch (ProgramInvocationException e) {
// that's what we want
}
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations