use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class CommitterOperatorTest method testStateRestore.
@Test
void testStateRestore() throws Exception {
final OneInputStreamOperatorTestHarness<CommittableMessage<String>, CommittableMessage<String>> testHarness = createTestHarness(new TestSink.RetryOnceCommitter());
testHarness.open();
final CommittableSummary<String> committableSummary = new CommittableSummary<>(1, 1, 0L, 1, 1, 0);
testHarness.processElement(new StreamRecord<>(committableSummary));
final CommittableWithLineage<String> first = new CommittableWithLineage<>("1", 0L, 1);
testHarness.processElement(new StreamRecord<>(first));
final OperatorSubtaskState snapshot = testHarness.snapshot(0L, 2L);
// Trigger first checkpoint but committer needs retry
testHarness.notifyOfCompletedCheckpoint(0);
assertThat(testHarness.getOutput()).isEmpty();
testHarness.close();
final ForwardingCommitter committer = new ForwardingCommitter();
final OneInputStreamOperatorTestHarness<CommittableMessage<String>, CommittableMessage<String>> restored = createTestHarness(committer);
restored.initializeState(snapshot);
restored.open();
// Previous committables are immediately committed if possible
final List<StreamElement> output = fromOutput(restored.getOutput());
assertThat(output).hasSize(2);
assertThat(committer.getSuccessfulCommits()).isEqualTo(1);
SinkV2Assertions.assertThat(toCommittableSummary(output.get(0))).hasFailedCommittables(committableSummary.getNumberOfFailedCommittables()).hasOverallCommittables(committableSummary.getNumberOfCommittables()).hasPendingCommittables(0);
SinkV2Assertions.assertThat(toCommittableWithLinage(output.get(1))).isEqualTo(new CommittableWithLineage<>(first.getCommittable(), 1L, 0));
restored.close();
}
use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class SinkWriterOperatorTest method testLoadPreviousSinkState.
@ParameterizedTest
@ValueSource(booleans = { true, false })
public void testLoadPreviousSinkState(boolean stateful) throws Exception {
// 1. Build previous sink state
final List<String> previousSinkInputs = Arrays.asList("bit", "mention", "thick", "stick", "stir", "easy", "sleep", "forth", "cost", "prompt");
final OneInputStreamOperatorTestHarness<String, String> previousSink = new OneInputStreamOperatorTestHarness<>(new DummySinkOperator(), StringSerializer.INSTANCE);
OperatorSubtaskState previousSinkState = TestHarnessUtil.buildSubtaskState(previousSink, previousSinkInputs);
// 2. Load previous sink state and verify the output
final OneInputStreamOperatorTestHarness<Integer, CommittableMessage<Integer>> compatibleWriterOperator = createCompatibleStateTestHarness(stateful);
final List<String> expectedOutput1 = stateful ? new ArrayList<>(previousSinkInputs) : new ArrayList<>();
expectedOutput1.add(Tuple3.of(1, 1, Long.MIN_VALUE).toString());
// load the state from previous sink
compatibleWriterOperator.initializeState(previousSinkState);
compatibleWriterOperator.open();
compatibleWriterOperator.processElement(1, 1);
// this will flush out the committables that were restored from previous sink
compatibleWriterOperator.endInput();
compatibleWriterOperator.prepareSnapshotPreBarrier(1);
OperatorSubtaskState operatorStateWithoutPreviousState = compatibleWriterOperator.snapshot(1L, 1L);
compatibleWriterOperator.close();
assertEmitted(expectedOutput1, compatibleWriterOperator.getOutput());
// 3. Restore the sink without previous sink's state
final OneInputStreamOperatorTestHarness<Integer, CommittableMessage<Integer>> restoredSinkOperator = createCompatibleStateTestHarness(stateful);
final List<String> expectedOutput2 = Arrays.asList(Tuple3.of(2, 2, Long.MIN_VALUE).toString(), Tuple3.of(3, 3, Long.MIN_VALUE).toString());
restoredSinkOperator.initializeState(operatorStateWithoutPreviousState);
restoredSinkOperator.open();
restoredSinkOperator.processElement(2, 2);
restoredSinkOperator.processElement(3, 3);
// this will flush out the committables that were restored
restoredSinkOperator.endInput();
restoredSinkOperator.prepareSnapshotPreBarrier(2);
assertEmitted(expectedOutput2, restoredSinkOperator.getOutput());
restoredSinkOperator.close();
}
use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class SinkWriterOperatorTest method testWatermarkPropagatedToSinkWriter.
@Test
void testWatermarkPropagatedToSinkWriter() throws Exception {
final long initialTime = 0;
final TestSink.DefaultSinkWriter<Integer> writer = new TestSink.DefaultSinkWriter<>();
final OneInputStreamOperatorTestHarness<Integer, CommittableMessage<Integer>> testHarness = new OneInputStreamOperatorTestHarness<>(new SinkWriterOperatorFactory<>(TestSink.newBuilder().setWriter(writer).build().asV2(), STREAMING_MODE, CHECKPOINTING_ENABLED));
testHarness.open();
testHarness.processWatermark(initialTime);
testHarness.processWatermark(initialTime + 1);
assertThat(testHarness.getOutput()).containsExactly(new Watermark(initialTime), new Watermark(initialTime + 1));
assertThat(writer.watermarks).containsExactly(new org.apache.flink.api.common.eventtime.Watermark(initialTime), new org.apache.flink.api.common.eventtime.Watermark(initialTime + 1));
testHarness.close();
}
use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class CompactorOperatorStateHandler method processElement.
@Override
public void processElement(StreamRecord<Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>> element) throws Exception {
Either<CommittableMessage<FileSinkCommittable>, CompactorRequest> record = element.getValue();
if (stateDrained) {
// all input should be committable messages to pass through
output.collect(new StreamRecord<>(record.left()));
return;
}
if (record.isRight()) {
submit(element.getValue().right());
return;
}
CommittableMessage<FileSinkCommittable> message = record.left();
if (message instanceof CommittableSummary) {
checkState(holdingSummary == null, "Duplicate summary before the first checkpoint.");
holdingSummary = (CommittableSummary<FileSinkCommittable>) message;
holdingMessages = new ArrayList<>(holdingSummary.getNumberOfCommittables());
} else {
boolean compacting = false;
CommittableWithLineage<FileSinkCommittable> committableWithLineage = (CommittableWithLineage<FileSinkCommittable>) message;
if (committableWithLineage.getCommittable().hasPendingFile()) {
FileSinkCommittable committable = committableWithLineage.getCommittable();
PendingFileRecoverable pendingFile = committable.getPendingFile();
if (pendingFile.getPath() != null && pendingFile.getPath().getName().startsWith(".")) {
// The pending file is the in-progress file of the previous run, which
// should be committed and compacted before sending to the committer.
CompactorRequest request = new CompactorRequest(committable.getBucketId());
request.addToCompact(committable);
submit(request);
compacting = true;
compactingMessages.add(message);
} else {
// A normal file is received, indicating the writer state is drained.
writerStateDrained = true;
if (compactingMessages.isEmpty() && compactingRequests.isEmpty()) {
// No state needs to be handled, the holding summary and all committable
// messages can be sent eagerly
checkState(holdingSummary != null);
output.collect(new StreamRecord<>(holdingSummary));
holdingSummary = null;
this.stateDrained = true;
output.collect(new StreamRecord<>(committableWithLineage));
}
}
}
if (!compacting && !stateDrained) {
// Compacting messages should not be added
// If the state is drained, no further messages need to be added
holdingMessages.add(message);
}
}
}
Aggregations