use of org.apache.flink.streaming.connectors.kafka.testutils.FailingIdentityMapper in project flink by apache.
the class KafkaShuffleExactlyOnceITCase method testAssignedToPartitionFailureRecovery.
/**
* To test failure recovery with partition assignment after processing 2/3 data.
*
* <p>Schema: (key, timestamp, source instance Id). Producer Parallelism = 2; Kafka Partition #
* = 3; Consumer Parallelism = 3
*/
private void testAssignedToPartitionFailureRecovery(int numElementsPerProducer, TimeCharacteristic timeCharacteristic) throws Exception {
String topic = topic("partition_failure_recovery", timeCharacteristic);
final int numberOfPartitions = 3;
final int producerParallelism = 2;
final int failAfterElements = numElementsPerProducer * producerParallelism * 2 / 3;
createTestTopic(topic, numberOfPartitions, 1);
final StreamExecutionEnvironment env = createEnvironment(producerParallelism, timeCharacteristic);
KeyedStream<Tuple3<Integer, Long, Integer>, Tuple> keyedStream = createKafkaShuffle(env, topic, numElementsPerProducer, producerParallelism, timeCharacteristic, numberOfPartitions);
keyedStream.process(new PartitionValidator(keyedStream.getKeySelector(), numberOfPartitions, topic)).setParallelism(numberOfPartitions).map(new ToInteger(producerParallelism)).setParallelism(numberOfPartitions).map(new FailingIdentityMapper<>(failAfterElements)).setParallelism(1).addSink(new ValidatingExactlyOnceSink(numElementsPerProducer * producerParallelism)).setParallelism(1);
FailingIdentityMapper.failedBefore = false;
tryExecute(env, topic);
deleteTestTopic(topic);
}
use of org.apache.flink.streaming.connectors.kafka.testutils.FailingIdentityMapper in project flink by apache.
the class KafkaShuffleExactlyOnceITCase method testKafkaShuffleFailureRecovery.
/**
* To test failure recovery after processing 2/3 data.
*
* <p>Schema: (key, timestamp, source instance Id). Producer Parallelism = 1; Kafka Partition #
* = 1; Consumer Parallelism = 1
*/
private void testKafkaShuffleFailureRecovery(int numElementsPerProducer, TimeCharacteristic timeCharacteristic) throws Exception {
String topic = topic("failure_recovery", timeCharacteristic);
final int numberOfPartitions = 1;
final int producerParallelism = 1;
final int failAfterElements = numElementsPerProducer * numberOfPartitions * 2 / 3;
createTestTopic(topic, numberOfPartitions, 1);
final StreamExecutionEnvironment env = createEnvironment(producerParallelism, timeCharacteristic).enableCheckpointing(500);
createKafkaShuffle(env, topic, numElementsPerProducer, producerParallelism, timeCharacteristic, numberOfPartitions).map(new FailingIdentityMapper<>(failAfterElements)).setParallelism(1).map(new ToInteger(producerParallelism)).setParallelism(1).addSink(new ValidatingExactlyOnceSink(numElementsPerProducer * producerParallelism)).setParallelism(1);
FailingIdentityMapper.failedBefore = false;
tryExecute(env, topic);
deleteTestTopic(topic);
}
Aggregations