use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class FlinkKafkaProducerITCase method repartitionAndExecute.
private OperatorSubtaskState repartitionAndExecute(String topic, OperatorSubtaskState inputStates, int oldParallelism, int newParallelism, int maxParallelism, Iterator<Integer> inputData) throws Exception {
List<OperatorSubtaskState> outputStates = new ArrayList<>();
List<OneInputStreamOperatorTestHarness<Integer, Object>> testHarnesses = new ArrayList<>();
for (int subtaskIndex = 0; subtaskIndex < newParallelism; subtaskIndex++) {
OperatorSubtaskState initState = AbstractStreamOperatorTestHarness.repartitionOperatorState(inputStates, maxParallelism, oldParallelism, newParallelism, subtaskIndex);
OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic, maxParallelism, newParallelism, subtaskIndex, FlinkKafkaProducer.Semantic.EXACTLY_ONCE);
testHarnesses.add(testHarness);
testHarness.setup();
testHarness.initializeState(initState);
testHarness.open();
if (inputData.hasNext()) {
int nextValue = inputData.next();
testHarness.processElement(nextValue, 0);
OperatorSubtaskState snapshot = testHarness.snapshot(0, 0);
outputStates.add(snapshot);
checkState(snapshot.getRawOperatorState().isEmpty(), "Unexpected raw operator state");
checkState(snapshot.getManagedKeyedState().isEmpty(), "Unexpected managed keyed state");
checkState(snapshot.getRawKeyedState().isEmpty(), "Unexpected raw keyed state");
for (int i = 1; i < FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE - 1; i++) {
testHarness.processElement(-nextValue, 0);
testHarness.snapshot(i, 0);
}
}
}
for (OneInputStreamOperatorTestHarness<Integer, Object> testHarness : testHarnesses) {
testHarness.close();
}
return AbstractStreamOperatorTestHarness.repackageState(outputStates.toArray(new OperatorSubtaskState[outputStates.size()]));
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class FlinkKafkaProducerITCase method testFailBeforeNotifyAndResumeWorkAfterwards.
/**
* This tests checks whether FlinkKafkaProducer correctly aborts lingering transactions after a
* failure. If such transactions were left alone lingering it consumers would be unable to read
* committed records that were created after this lingering transaction.
*/
@Test
public void testFailBeforeNotifyAndResumeWorkAfterwards() throws Exception {
String topic = "flink-kafka-producer-fail-before-notify";
OneInputStreamOperatorTestHarness<Integer, Object> testHarness1 = createTestHarness(topic);
checkProducerLeak();
testHarness1.setup();
testHarness1.open();
testHarness1.processElement(42, 0);
testHarness1.snapshot(0, 1);
testHarness1.processElement(43, 2);
OperatorSubtaskState snapshot1 = testHarness1.snapshot(1, 3);
testHarness1.processElement(44, 4);
testHarness1.snapshot(2, 5);
testHarness1.processElement(45, 6);
// do not close previous testHarness to make sure that closing do not clean up something (in
// case of failure
// there might not be any close)
OneInputStreamOperatorTestHarness<Integer, Object> testHarness2 = createTestHarness(topic);
testHarness2.setup();
// restore from snapshot1, transactions with records 44 and 45 should be aborted
testHarness2.initializeState(snapshot1);
testHarness2.open();
// write and commit more records, after potentially lingering transactions
testHarness2.processElement(46, 7);
testHarness2.snapshot(4, 8);
testHarness2.processElement(47, 9);
testHarness2.notifyOfCompletedCheckpoint(4);
// now we should have:
// - records 42 and 43 in committed transactions
// - aborted transactions with records 44 and 45
// - committed transaction with record 46
// - pending transaction with record 47
assertExactlyOnceForTopic(createProperties(), topic, Arrays.asList(42, 43, 46));
try {
testHarness1.close();
} catch (Exception e) {
// transactional ID.
if (!(e.getCause() instanceof ProducerFencedException)) {
fail("Received unexpected exception " + e);
}
}
testHarness2.close();
deleteTestTopic(topic);
checkProducerLeak();
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class FlinkKafkaProducerITCase method testRestoreToCheckpointAfterExceedingProducersPool.
/**
* This test ensures that transactions reusing transactional.ids (after returning to the pool)
* will not clash with previous transactions using same transactional.ids.
*/
@Test
public void testRestoreToCheckpointAfterExceedingProducersPool() throws Exception {
String topic = "flink-kafka-producer-fail-before-notify";
try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness1 = createTestHarness(topic)) {
testHarness1.setup();
testHarness1.open();
testHarness1.processElement(42, 0);
OperatorSubtaskState snapshot = testHarness1.snapshot(0, 0);
testHarness1.processElement(43, 0);
testHarness1.notifyOfCompletedCheckpoint(0);
try {
for (int i = 0; i < FlinkKafkaProducer.DEFAULT_KAFKA_PRODUCERS_POOL_SIZE; i++) {
testHarness1.snapshot(i + 1, 0);
testHarness1.processElement(i, 0);
}
throw new IllegalStateException("This should not be reached.");
} catch (Exception ex) {
if (!isCausedBy(FlinkKafkaErrorCode.PRODUCERS_POOL_EMPTY, ex)) {
throw ex;
}
}
// might not be called)
try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness2 = createTestHarness(topic)) {
testHarness2.setup();
// restore from snapshot1, transactions with records 43 and 44 should be aborted
testHarness2.initializeState(snapshot);
testHarness2.open();
}
assertExactlyOnceForTopic(createProperties(), topic, Arrays.asList(42));
deleteTestTopic(topic);
} catch (Exception ex) {
// testHarness1 will be fenced off after creating and closing testHarness2
if (!findThrowable(ex, ProducerFencedException.class).isPresent()) {
throw ex;
}
}
checkProducerLeak();
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class FlinkKafkaProducerITCase method testRecoverCommittedTransaction.
@Test
public void testRecoverCommittedTransaction() throws Exception {
String topic = "flink-kafka-producer-recover-committed-transaction";
OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic);
testHarness.setup();
// producerA - start transaction (txn) 0
testHarness.open();
// producerA - write 42 in txn 0
testHarness.processElement(42, 0);
OperatorSubtaskState checkpoint0 = // producerA - pre commit txn 0, producerB - start txn 1
testHarness.snapshot(0, 1);
// producerB - write 43 in txn 1
testHarness.processElement(43, 2);
testHarness.notifyOfCompletedCheckpoint(// producerA - commit txn 0 and return to the pool
0);
// producerB - pre txn 1, producerA - start txn 2
testHarness.snapshot(1, 3);
// producerA - write 44 in txn 2
testHarness.processElement(44, 4);
// producerA - abort txn 2
testHarness.close();
testHarness = createTestHarness(topic);
testHarness.initializeState(// recover state 0 - producerA recover and commit txn 0
checkpoint0);
testHarness.close();
assertExactlyOnceForTopic(createProperties(), topic, Arrays.asList(42));
deleteTestTopic(topic);
checkProducerLeak();
}
use of org.apache.flink.runtime.checkpoint.OperatorSubtaskState in project flink by apache.
the class FlinkKafkaProducerITCase method testFailAndRecoverSameCheckpointTwice.
@Test
public void testFailAndRecoverSameCheckpointTwice() throws Exception {
String topic = "flink-kafka-producer-fail-and-recover-same-checkpoint-twice";
OperatorSubtaskState snapshot1;
try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic)) {
testHarness.setup();
testHarness.open();
testHarness.processElement(42, 0);
testHarness.snapshot(0, 1);
testHarness.processElement(43, 2);
snapshot1 = testHarness.snapshot(1, 3);
testHarness.processElement(44, 4);
}
try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic)) {
testHarness.setup();
// restore from snapshot1, transactions with records 44 and 45 should be aborted
testHarness.initializeState(snapshot1);
testHarness.open();
// write and commit more records, after potentially lingering transactions
testHarness.processElement(44, 7);
testHarness.snapshot(2, 8);
testHarness.processElement(45, 9);
}
try (OneInputStreamOperatorTestHarness<Integer, Object> testHarness = createTestHarness(topic)) {
testHarness.setup();
// restore from snapshot1, transactions with records 44 and 45 should be aborted
testHarness.initializeState(snapshot1);
testHarness.open();
// write and commit more records, after potentially lingering transactions
testHarness.processElement(44, 7);
testHarness.snapshot(3, 8);
testHarness.processElement(45, 9);
}
// now we should have:
// - records 42 and 43 in committed transactions
// - aborted transactions with records 44 and 45
assertExactlyOnceForTopic(createProperties(), topic, Arrays.asList(42, 43));
deleteTestTopic(topic);
checkProducerLeak();
}
Aggregations