use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class FileCommitterTest method testCommitMultiple.
@Test
public void testCommitMultiple() throws Exception {
StubBucketWriter stubBucketWriter = new StubBucketWriter();
FileCommitter fileCommitter = new FileCommitter(stubBucketWriter);
Collection<CommitRequest<FileSinkCommittable>> committables = Stream.of(new FileSinkCommittable("0", new FileSinkTestUtils.TestPendingFileRecoverable()), new FileSinkCommittable("0", new FileSinkTestUtils.TestPendingFileRecoverable()), new FileSinkCommittable("0", new FileSinkTestUtils.TestInProgressFileRecoverable()), new FileSinkCommittable("0", new FileSinkTestUtils.TestPendingFileRecoverable()), new FileSinkCommittable("0", new FileSinkTestUtils.TestInProgressFileRecoverable())).map(MockCommitRequest::new).collect(Collectors.toList());
fileCommitter.commit(committables);
assertEquals(3, stubBucketWriter.getRecoveredPendingFiles().size());
assertEquals(2, stubBucketWriter.getNumCleanUp());
stubBucketWriter.getRecoveredPendingFiles().forEach(pendingFile -> assertTrue(pendingFile.isCommitted()));
assertTrue(committables.stream().allMatch(c -> c.getNumberOfRetries() == 0));
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class FileCommitterTest method testCommitPendingFile.
@Test
public void testCommitPendingFile() throws Exception {
StubBucketWriter stubBucketWriter = new StubBucketWriter();
FileCommitter fileCommitter = new FileCommitter(stubBucketWriter);
MockCommitRequest<FileSinkCommittable> fileSinkCommittable = new MockCommitRequest<>(new FileSinkCommittable("0", new FileSinkTestUtils.TestPendingFileRecoverable()));
fileCommitter.commit(Collections.singletonList(fileSinkCommittable));
assertEquals(1, stubBucketWriter.getRecoveredPendingFiles().size());
assertEquals(0, stubBucketWriter.getNumCleanUp());
assertTrue(stubBucketWriter.getRecoveredPendingFiles().get(0).isCommitted());
assertEquals(0, fileSinkCommittable.getNumberOfRetries());
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactorOperatorTest method testStateHandler.
@Test
public void testStateHandler() throws Exception {
FileCompactor fileCompactor = new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<CompactorRequest, CommittableMessage<FileSinkCommittable>> harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
harness.processElement(request("0", Arrays.asList(committable("0", ".0", 1), committable("0", ".1", 2)), null));
harness.snapshot(1, 1L);
harness.processElement(request("0", Arrays.asList(committable("0", ".2", 3), committable("0", ".3", 4)), null));
harness.notifyOfCompletedCheckpoint(1);
// request 1 is submitted and request 2 is pending
state = harness.snapshot(2, 2L);
}
CompactorOperatorStateHandler handler = new CompactorOperatorStateHandler(getTestCommittableSerializer(), createTestBucketWriter());
try (OneInputStreamOperatorTestHarness<Either<CommittableMessage<FileSinkCommittable>, CompactorRequest>, CommittableMessage<FileSinkCommittable>> harness = new OneInputStreamOperatorTestHarness<>(handler)) {
harness.setup();
harness.initializeState(state);
harness.open();
// remaining requests from coordinator
harness.processElement(new StreamRecord<>(Either.Right(request("0", Collections.singletonList(committable("0", ".4", 5)), null).getValue())));
harness.processElement(new StreamRecord<>(Either.Right(request("0", Collections.singletonList(committable("0", ".5", 6)), null).getValue())));
harness.processElement(new StreamRecord<>(Either.Left(new CommittableSummary<>(0, 1, 3L, 2, 2, 0))));
// remaining in-progress file from file writer
harness.processElement(new StreamRecord<>(Either.Left(new CommittableWithLineage<>(committable("0", ".6", 7), 3L, 0))));
// new pending file written this time
harness.processElement(new StreamRecord<>(Either.Left(new CommittableWithLineage<>(committable("0", "7", 8), 3L, 0))));
Assert.assertTrue(handler.isWriterStateDrained());
Assert.assertFalse(handler.isStateDrained());
// the result should not be emitted yet, but all requests should already be submitted
Assert.assertEquals(0, harness.extractOutputValues().size());
compactor.getAllTasksFuture().join();
// state should be drained, and all results and holding messages should be emitted
harness.prepareSnapshotPreBarrier(3);
Assert.assertTrue(handler.isStateDrained());
// summary should be merged into one
// 1 summary+ 1 compacted + (1 compacted committable + 1 compacted cleanup) * 7
List<CommittableMessage<FileSinkCommittable>> results = harness.extractOutputValues();
Assert.assertEquals(16, results.size());
SinkV2Assertions.assertThat((CommittableSummary<?>) results.get(0)).hasPendingCommittables(15);
List<FileSinkCommittable> expectedResult = Arrays.asList(committable("0", "7", 8), committable("0", "compacted-0", 1), cleanupPath("0", ".0"), committable("0", "compacted-1", 2), cleanupPath("0", ".1"), committable("0", "compacted-2", 3), cleanupPath("0", ".2"), committable("0", "compacted-3", 4), cleanupPath("0", ".3"), committable("0", "compacted-4", 5), cleanupPath("0", ".4"), committable("0", "compacted-5", 6), cleanupPath("0", ".5"), committable("0", "compacted-6", 7), cleanupPath("0", ".6"));
for (int i = 1; i < results.size(); ++i) {
SinkV2Assertions.assertThat((CommittableWithLineage<?>) results.get(i)).hasCommittable(expectedResult.get(i - 1));
}
}
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactorOperatorTest method testPassthrough.
@Test
public void testPassthrough() throws Exception {
FileCompactor fileCompactor = new RecordWiseFileCompactor<>(new DecoderBasedReader.Factory<>(IntDecoder::new));
CompactorOperator compactor = createTestOperator(fileCompactor);
try (OneInputStreamOperatorTestHarness<CompactorRequest, CommittableMessage<FileSinkCommittable>> harness = new OneInputStreamOperatorTestHarness<>(compactor)) {
harness.setup();
harness.open();
FileSinkCommittable cleanupInprogressRequest = cleanupInprogress("0", "0", 1);
FileSinkCommittable cleanupPathRequest = cleanupPath("0", "1");
harness.processElement(request("0", null, Collections.singletonList(cleanupInprogressRequest)));
harness.processElement(request("0", null, Collections.singletonList(cleanupPathRequest)));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(1);
harness.snapshot(1, 1L);
harness.notifyOfCompletedCheckpoint(1);
compactor.getAllTasksFuture().join();
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(2);
List<CommittableMessage<FileSinkCommittable>> results = harness.extractOutputValues();
Assert.assertEquals(3, results.size());
SinkV2Assertions.assertThat((CommittableSummary<?>) results.get(0)).hasPendingCommittables(2);
SinkV2Assertions.assertThat((CommittableWithLineage<?>) results.get(1)).hasCommittable(cleanupInprogressRequest);
SinkV2Assertions.assertThat((CommittableWithLineage<?>) results.get(2)).hasCommittable(cleanupPathRequest);
}
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class FileWriterTest method testPreCommit.
@Test
public void testPreCommit() throws Exception {
File outDir = TEMP_FOLDER.newFolder();
Path path = new Path(outDir.toURI());
FileWriter<String> fileWriter = createWriter(path, OnCheckpointRollingPolicy.build(), new OutputFileConfig("part-", ""));
fileWriter.write("test1", new ContextImpl());
fileWriter.write("test1", new ContextImpl());
fileWriter.write("test2", new ContextImpl());
fileWriter.write("test2", new ContextImpl());
fileWriter.write("test3", new ContextImpl());
Collection<FileSinkCommittable> committables = fileWriter.prepareCommit();
assertEquals(3, committables.size());
}
Aggregations