use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class MiniClusterITCase method testJobWithAFailingReceiverVertex.
@Test
public void testJobWithAFailingReceiverVertex() throws Exception {
final int parallelism = 11;
final MiniClusterConfiguration cfg = new MiniClusterConfiguration.Builder().setNumTaskManagers(1).setNumSlotsPerTaskManager(2 * parallelism).setConfiguration(getDefaultConfiguration()).build();
try (final MiniCluster miniCluster = new MiniCluster(cfg)) {
miniCluster.start();
final JobVertex sender = new JobVertex("Sender");
sender.setInvokableClass(Sender.class);
sender.setParallelism(parallelism);
final JobVertex receiver = new JobVertex("Receiver");
receiver.setInvokableClass(ExceptionReceiver.class);
receiver.setParallelism(parallelism);
receiver.connectNewDataSetAsInput(sender, DistributionPattern.POINTWISE, ResultPartitionType.PIPELINED);
final JobGraph jobGraph = JobGraphTestUtils.streamingJobGraph(sender, receiver);
try {
miniCluster.executeJobBlocking(jobGraph);
fail("Job should fail.");
} catch (JobExecutionException e) {
assertTrue(findThrowable(e, Exception.class).isPresent());
assertTrue(findThrowableWithMessage(e, "Test exception").isPresent());
}
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class ElasticsearchSinkTestBase method runInvalidElasticsearchClusterTest.
/**
* Tests whether the Elasticsearch sink fails when there is no cluster to connect to.
*/
public void runInvalidElasticsearchClusterTest() throws Exception {
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
DataStreamSource<Tuple2<Integer, String>> source = env.addSource(new SourceSinkDataTestKit.TestDataSourceFunction());
source.addSink(createElasticsearchSinkForNode(1, "invalid-cluster-name", SourceSinkDataTestKit.getJsonSinkFunction("test"), // incorrect ip address
"123.123.123.123"));
try {
env.execute("Elasticsearch Sink Test");
} catch (JobExecutionException expectedException) {
// test passes
return;
}
fail();
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class SavepointITCase method testSubmitWithUnknownSavepointPath.
@Test
public void testSubmitWithUnknownSavepointPath() throws Exception {
// Config
int numTaskManagers = 1;
int numSlotsPerTaskManager = 1;
int parallelism = numTaskManagers * numSlotsPerTaskManager;
final Configuration config = new Configuration();
config.setString(CheckpointingOptions.SAVEPOINT_DIRECTORY, savepointDir.toURI().toString());
MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().setConfiguration(config).setNumberTaskManagers(numTaskManagers).setNumberSlotsPerTaskManager(numSlotsPerTaskManager).build());
cluster.before();
ClusterClient<?> client = cluster.getClusterClient();
try {
// High value to ensure timeouts if restarted.
int numberOfRetries = 1000;
// Submit the job
// Long delay to ensure that the test times out if the job
// manager tries to restart the job.
final JobGraph jobGraph = createJobGraph(parallelism, numberOfRetries, 3600000);
// Set non-existing savepoint path
jobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath("unknown path"));
assertEquals("unknown path", jobGraph.getSavepointRestoreSettings().getRestorePath());
LOG.info("Submitting job " + jobGraph.getJobID() + " in detached mode.");
try {
submitJobAndWaitForResult(client, jobGraph, getClass().getClassLoader());
} catch (Exception e) {
Optional<JobExecutionException> expectedJobExecutionException = findThrowable(e, JobExecutionException.class);
Optional<FileNotFoundException> expectedFileNotFoundException = findThrowable(e, FileNotFoundException.class);
if (!(expectedJobExecutionException.isPresent() && expectedFileNotFoundException.isPresent())) {
throw e;
}
}
} finally {
cluster.after();
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class RescalingITCase method testSavepointRescalingNonPartitionedStateCausesException.
/**
* Tests that a job cannot be restarted from a savepoint with a different parallelism if the
* rescaled operator has non-partitioned state.
*
* @throws Exception
*/
@Test
public void testSavepointRescalingNonPartitionedStateCausesException() throws Exception {
final int parallelism = numSlots / 2;
final int parallelism2 = numSlots;
final int maxParallelism = 13;
Duration timeout = Duration.ofMinutes(3);
Deadline deadline = Deadline.now().plus(timeout);
ClusterClient<?> client = cluster.getClusterClient();
try {
JobGraph jobGraph = createJobGraphWithOperatorState(parallelism, maxParallelism, OperatorCheckpointMethod.NON_PARTITIONED);
// make sure the job does not finish before we take the savepoint
StateSourceBase.canFinishLatch = new CountDownLatch(1);
final JobID jobID = jobGraph.getJobID();
client.submitJob(jobGraph).get();
// wait until the operator is started
waitForAllTaskRunning(cluster.getMiniCluster(), jobGraph.getJobID(), false);
// wait until the operator handles some data
StateSourceBase.workStartedLatch.await();
CompletableFuture<String> savepointPathFuture = client.triggerSavepoint(jobID, null, SavepointFormatType.CANONICAL);
final String savepointPath = savepointPathFuture.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
// we took a savepoint, the job can finish now
StateSourceBase.canFinishLatch.countDown();
client.cancel(jobID).get();
while (!getRunningJobs(client).isEmpty()) {
Thread.sleep(50);
}
// job successfully removed
JobGraph scaledJobGraph = createJobGraphWithOperatorState(parallelism2, maxParallelism, OperatorCheckpointMethod.NON_PARTITIONED);
scaledJobGraph.setSavepointRestoreSettings(SavepointRestoreSettings.forPath(savepointPath));
submitJobAndWaitForResult(client, scaledJobGraph, getClass().getClassLoader());
} catch (JobExecutionException exception) {
if (exception.getCause() instanceof IllegalStateException) {
// we expect a IllegalStateException wrapped
// in a JobExecutionException, because the job containing non-partitioned state
// is being rescaled
} else {
throw exception;
}
}
}
use of org.apache.flink.runtime.client.JobExecutionException in project flink by apache.
the class CustomSerializationITCase method testIncorrectSerializer2.
@Test
public void testIncorrectSerializer2() {
try {
ExecutionEnvironment env = ExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(PARLLELISM);
env.generateSequence(1, 10 * PARLLELISM).map(new MapFunction<Long, ConsumesTooMuchSpanning>() {
@Override
public ConsumesTooMuchSpanning map(Long value) throws Exception {
return new ConsumesTooMuchSpanning();
}
}).rebalance().output(new DiscardingOutputFormat<ConsumesTooMuchSpanning>());
env.execute();
} catch (JobExecutionException e) {
Optional<IOException> rootCause = findThrowable(e, IOException.class);
assertTrue(rootCause.isPresent());
assertTrue(rootCause.get().getMessage().contains("broken serialization"));
} catch (Exception e) {
e.printStackTrace();
fail(e.getMessage());
}
}
Aggregations