use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class CompactCoordinatorTest method testStateHandler.
@Test
public void testStateHandler() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().setSizeThreshold(10).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
// with . prefix
FileSinkCommittable committable0 = committable("0", ".0", 5);
FileSinkCommittable committable1 = committable("0", ".1", 6);
// without . prefix
FileSinkCommittable committable2 = committable("0", "2", 6);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
harness.processElement(message(committable0));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(1);
state = harness.snapshot(1, 1);
}
CompactCoordinatorStateHandler handler = new CompactCoordinatorStateHandler(getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>> harness = new OneInputStreamOperatorTestHarness<>(handler)) {
harness.setup(new EitherSerializer<>(new SimpleVersionedSerializerTypeSerializerProxy<>(() -> new CommittableMessageSerializer<>(getTestCommittableSerializer())), new SimpleVersionedSerializerTypeSerializerProxy<>(() -> new CompactorRequestSerializer(getTestCommittableSerializer()))));
harness.initializeState(state);
harness.open();
Assert.assertEquals(1, harness.extractOutputValues().size());
harness.processElement(message(committable1));
harness.processElement(message(committable2));
List<Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>> results = harness.extractOutputValues();
Assert.assertEquals(3, results.size());
// restored request
Assert.assertTrue(results.get(0).isRight());
assertToCompact(results.get(0).right(), committable0);
// committable with . prefix should also be passed through
Assert.assertTrue(results.get(1).isLeft() && results.get(1).left() instanceof CommittableWithLineage);
Assert.assertEquals(((CommittableWithLineage<FileSinkCommittable>) results.get(1).left()).getCommittable(), committable1);
// committable without . prefix should be passed through normally
Assert.assertTrue(results.get(2).isLeft() && results.get(2).left() instanceof CommittableWithLineage);
Assert.assertEquals(((CommittableWithLineage<FileSinkCommittable>) results.get(2).left()).getCommittable(), committable2);
}
}
use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class CompactCoordinatorTest method testCompactOnCheckpoint.
@Test
public void testCompactOnCheckpoint() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().enableCompactionOnCheckpoint(1).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
FileSinkCommittable committable0 = committable("0", ".0", 5);
FileSinkCommittable committable1 = committable("0", ".1", 6);
FileSinkCommittable committable2 = committable("0", ".2", 5);
FileSinkCommittable committable3 = committable("1", ".0", 5);
harness.processElement(message(committable0));
harness.processElement(message(committable1));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(1);
harness.snapshot(1, 1);
Assert.assertEquals(1, harness.extractOutputValues().size());
harness.processElement(message(committable2));
harness.processElement(message(committable3));
Assert.assertEquals(1, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(2);
harness.snapshot(2, 2);
List<CompactorRequest> results = harness.extractOutputValues();
Assert.assertEquals(3, results.size());
assertToCompact(results.get(0), committable0, committable1);
assertToCompact(results.get(1), committable2);
assertToCompact(results.get(2), committable3);
}
}
use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class SinkWriterOperatorTest method testStateRestore.
@ParameterizedTest
@ValueSource(booleans = { true, false })
void testStateRestore(boolean stateful) throws Exception {
final long initialTime = 0;
final SnapshottingBufferingSinkWriter snapshottingWriter = new SnapshottingBufferingSinkWriter();
final OneInputStreamOperatorTestHarness<Integer, CommittableMessage<Integer>> testHarness = createTestHarnessWithBufferingSinkWriter(snapshottingWriter, stateful);
testHarness.open();
testHarness.processWatermark(initialTime);
testHarness.processElement(1, initialTime + 1);
testHarness.processElement(2, initialTime + 2);
testHarness.prepareSnapshotPreBarrier(1L);
OperatorSubtaskState snapshot = testHarness.snapshot(1L, 1L);
// we see the watermark and the committable summary, so the committables must be stored in
// state
assertThat(testHarness.getOutput()).hasSize(2).contains(new Watermark(initialTime));
assertThat(snapshottingWriter.lastCheckpointId).isEqualTo(stateful ? 1L : SnapshottingBufferingSinkWriter.NOT_SNAPSHOTTED);
testHarness.close();
final OneInputStreamOperatorTestHarness<Integer, CommittableMessage<Integer>> restoredTestHarness = createTestHarnessWithBufferingSinkWriter(new SnapshottingBufferingSinkWriter(), stateful);
restoredTestHarness.initializeState(snapshot);
restoredTestHarness.open();
// this will flush out the committables that were restored
restoredTestHarness.endInput();
final long checkpointId = 2;
restoredTestHarness.prepareSnapshotPreBarrier(checkpointId);
if (stateful) {
assertBasicOutput(restoredTestHarness.getOutput(), 2, checkpointId);
} else {
assertThat(fromOutput(restoredTestHarness.getOutput()).get(0).asRecord().getValue()).isInstanceOf(CommittableSummary.class).satisfies(cs -> SinkV2Assertions.assertThat((CommittableSummary<?>) cs).hasOverallCommittables(0).hasPendingCommittables(0).hasFailedCommittables(0));
}
restoredTestHarness.close();
}
use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class CompactorOperatorTest method testCompact.
@Test
public void testCompact() throws Exception {
FileCompactor fileCompactor = new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
try (OneInputStreamOperatorTestHarness<CompactorRequest, CommittableMessage<FileSinkCommittable>> harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
harness.processElement(request("0", Arrays.asList(committable("0", ".0", 5), committable("0", ".1", 5)), null));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(1);
harness.snapshot(1, 1L);
harness.notifyOfCompletedCheckpoint(1);
compactor.getAllTasksFuture().join();
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(2);
// 1summary+1compacted+2cleanup
List<CommittableMessage<FileSinkCommittable>> results = harness.extractOutputValues();
Assert.assertEquals(4, results.size());
SinkV2Assertions.assertThat((CommittableSummary<?>) results.get(0)).hasPendingCommittables(3);
SinkV2Assertions.assertThat((CommittableWithLineage<?>) results.get(1)).hasCommittable(committable("0", "compacted-0", 10));
SinkV2Assertions.assertThat((CommittableWithLineage<?>) results.get(2)).hasCommittable(cleanupPath("0", ".0"));
SinkV2Assertions.assertThat((CommittableWithLineage<?>) results.get(3)).hasCommittable(cleanupPath("0", ".1"));
}
}
use of org.apache.flink.streaming.api.connector.sink2.CommittableMessage in project flink by apache.
the class CompactorOperatorTest method testStateHandler.
@Test
public void testStateHandler() throws Exception {
FileCompactor fileCompactor = new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<CompactorRequest, CommittableMessage<FileSinkCommittable>> harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
harness.processElement(request("0", Arrays.asList(committable("0", ".0", 1), committable("0", ".1", 2)), null));
harness.snapshot(1, 1L);
harness.processElement(request("0", Arrays.asList(committable("0", ".2", 3), committable("0", ".3", 4)), null));
harness.notifyOfCompletedCheckpoint(1);
// request 1 is submitted and request 2 is pending
state = harness.snapshot(2, 2L);
}
CompactorOperatorStateHandler handler = new CompactorOperatorStateHandler(getTestCommittableSerializer(), createTestBucketWriter());
try (OneInputStreamOperatorTestHarness<Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>, CommittableMessage<FileSinkCommittable>> harness = new OneInputStreamOperatorTestHarness<>(handler)) {
harness.setup();
harness.initializeState(state);
harness.open();
// remaining requests from coordinator
harness.processElement(new StreamRecord<>(Either.Right(request("0", Collections.singletonList(committable("0", ".4", 5)), null).getValue())));
harness.processElement(new StreamRecord<>(Either.Right(request("0", Collections.singletonList(committable("0", ".5", 6)), null).getValue())));
harness.processElement(new StreamRecord<>(Either.Left(new CommittableSummary<>(0, 1, 3L, 2, 2, 0))));
// remaining in-progress file from file writer
harness.processElement(new StreamRecord<>(Either.Left(new CommittableWithLineage<>(committable("0", ".6", 7), 3L, 0))));
// new pending file written this time
harness.processElement(new StreamRecord<>(Either.Left(new CommittableWithLineage<>(committable("0", "7", 8), 3L, 0))));
Assert.assertTrue(handler.isWriterStateDrained());
Assert.assertFalse(handler.isStateDrained());
// the result should not be emitted yet, but all requests should already be submitted
Assert.assertEquals(0, harness.extractOutputValues().size());
compactor.getAllTasksFuture().join();
// state should be drained, and all results and holding messages should be emitted
harness.prepareSnapshotPreBarrier(3);
Assert.assertTrue(handler.isStateDrained());
// summary should be merged into one
// 1 summary+ 1 compacted + (1 compacted committable + 1 compacted cleanup) * 7
List<CommittableMessage<FileSinkCommittable>> results = harness.extractOutputValues();
Assert.assertEquals(16, results.size());
SinkV2Assertions.assertThat((CommittableSummary<?>) results.get(0)).hasPendingCommittables(15);
List<FileSinkCommittable> expectedResult = Arrays.asList(committable("0", "7", 8), committable("0", "compacted-0", 1), cleanupPath("0", ".0"), committable("0", "compacted-1", 2), cleanupPath("0", ".1"), committable("0", "compacted-2", 3), cleanupPath("0", ".2"), committable("0", "compacted-3", 4), cleanupPath("0", ".3"), committable("0", "compacted-4", 5), cleanupPath("0", ".4"), committable("0", "compacted-5", 6), cleanupPath("0", ".5"), committable("0", "compacted-6", 7), cleanupPath("0", ".6"));
for (int i = 1; i < results.size(); ++i) {
SinkV2Assertions.assertThat((CommittableWithLineage<?>) results.get(i)).hasCommittable(expectedResult.get(i - 1));
}
}
}
Aggregations