use of org.apache.flink.runtime.operators.lifecycle.graph.TestEventSource in project flink by apache.
the class IncrementalStateReuseAfterFailureITCase method createJob.
private TestJobWithDescription createJob() {
StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
env.enableCheckpointing(200);
// reliably fails Changelog with FLINK-25395, but might affect any incremental backend
env.enableChangelogStateBackend(true);
env.setRestartStrategy(RestartStrategies.fixedDelayRestart(1, 1));
// simplify debugging
env.setMaxParallelism(1);
// simplify debugging
env.setParallelism(1);
TestEventQueue evQueue = TestEventQueue.createShared(sharedObjects);
TestCommandDispatcher cmdQueue = TestCommandDispatcher.createShared(sharedObjects);
DataStream<TestDataElement> src = env.addSource(new TestEventSource(UID_SRC, evQueue, cmdQueue)).setUidHash(UID_SRC);
SingleOutputStreamOperator<TestDataElement> transform1 = src.keyBy(x -> x).transform("transform-1", TypeInformation.of(TestDataElement.class), new OneInputTestStreamOperatorFactory(UID_OP1, evQueue, cmdQueue)).setUidHash(UID_OP1);
SingleOutputStreamOperator<TestDataElement> transform2 = // chain two keyed operators, so that one is checkpointed and the other one fails
DataStreamUtils.reinterpretAsKeyedStream(transform1, x -> x).transform("transform-2", TypeInformation.of(TestDataElement.class), new OneInputTestStreamOperatorFactory(UID_OP2, evQueue, cmdQueue)).setUidHash(UID_OP2);
transform2.addSink(new DiscardingSink<>());
return new TestJobWithDescription(env.getStreamGraph().getJobGraph(), emptySet(), emptySet(), emptySet(), emptyMap(), evQueue, cmdQueue);
}
Aggregations