use of org.apache.flink.streaming.api.functions.source.SourceFunction in project flink by apache.
the class SavepointITCase method testStopWithFailingSourceInOnePipeline.
/**
* FLINK-21030
*
* <p>Tests the handling of a failure that happened while stopping an embarrassingly parallel
* job with a Savepoint. The test expects that the stopping action fails and all executions are
* in state {@code RUNNING} afterwards.
*
* @param failingSource the failing {@link SourceFunction} used in one of the two pipelines.
* @param expectedMaximumNumberOfRestarts the maximum number of restarts allowed by the restart
* strategy.
* @param exceptionAssertion asserts the client-call exception to verify that the right error
* was handled.
* @see SavepointITCase#failingPipelineLatch The latch used to trigger the successful start of
* the later on failing pipeline.
* @see SavepointITCase#succeedingPipelineLatch The latch that triggers the successful start of
* the succeeding pipeline.
* @throws Exception if an error occurred while running the test.
*/
private static void testStopWithFailingSourceInOnePipeline(InfiniteTestSource failingSource, File savepointDir, int expectedMaximumNumberOfRestarts, BiFunction<JobID, ExecutionException, Boolean> exceptionAssertion) throws Exception {
MiniClusterWithClientResource cluster = new MiniClusterWithClientResource(new MiniClusterResourceConfiguration.Builder().build());
failingPipelineLatch = new OneShotLatch();
succeedingPipelineLatch = new OneShotLatch();
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.setParallelism(1);
env.getConfig().setRestartStrategy(RestartStrategies.fixedDelayRestart(expectedMaximumNumberOfRestarts, 0));
env.addSource(failingSource).name("Failing Source").map(value -> {
failingPipelineLatch.trigger();
return value;
}).addSink(new DiscardingSink<>());
env.addSource(new InfiniteTestSource()).name("Succeeding Source").map(value -> {
succeedingPipelineLatch.trigger();
return value;
}).addSink(new DiscardingSink<>());
final JobGraph jobGraph = env.getStreamGraph().getJobGraph();
cluster.before();
try {
ClusterClient<?> client = cluster.getClusterClient();
JobID jobID = client.submitJob(jobGraph).get();
// we need to wait for both pipelines to be in state RUNNING because that's the only
// state which allows creating a savepoint
failingPipelineLatch.await();
succeedingPipelineLatch.await();
waitForAllTaskRunning(cluster.getMiniCluster(), jobID, false);
try {
client.stopWithSavepoint(jobGraph.getJobID(), false, savepointDir.getAbsolutePath(), SavepointFormatType.CANONICAL).get();
fail("The future should fail exceptionally.");
} catch (ExecutionException e) {
assertThrowable(e, ex -> exceptionAssertion.apply(jobGraph.getJobID(), e));
}
waitUntilAllTasksAreRunning(cluster.getRestClusterClient(), jobGraph.getJobID());
} finally {
cluster.after();
}
}
Aggregations