use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class FileWriterBucketTest method testOnCheckpointRollingOnCheckpoint.
@Test
public void testOnCheckpointRollingOnCheckpoint() throws IOException {
File outDir = TEMP_FOLDER.newFolder();
Path path = new Path(outDir.toURI());
TestRecoverableWriter recoverableWriter = getRecoverableWriter(path);
FileWriterBucket<String> bucket = createBucket(recoverableWriter, path, ON_CHECKPOING_ROLLING_POLICY, OutputFileConfig.builder().build());
bucket.write("test-element", 0);
List<FileSinkCommittable> fileSinkCommittables = bucket.prepareCommit(false);
FileWriterBucketState bucketState = bucket.snapshotState();
compareNumberOfPendingAndInProgress(fileSinkCommittables, 1, 0);
assertEquals(BUCKET_ID, bucketState.getBucketId());
assertEquals(path, bucketState.getBucketPath());
assertNull("The bucket should not have in-progress recoverable", bucketState.getInProgressFileRecoverable());
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class FileWriterBucketTest method testFlush.
@Test
public void testFlush() throws IOException {
File outDir = TEMP_FOLDER.newFolder();
Path path = new Path(outDir.toURI());
TestRecoverableWriter recoverableWriter = getRecoverableWriter(path);
FileWriterBucket<String> bucket = createBucket(recoverableWriter, path, DEFAULT_ROLLING_POLICY, OutputFileConfig.builder().build());
bucket.write("test-element", 0);
List<FileSinkCommittable> fileSinkCommittables = bucket.prepareCommit(true);
compareNumberOfPendingAndInProgress(fileSinkCommittables, 1, 0);
assertNull("The bucket should not have in-progress part after flushed", bucket.getInProgressPart());
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class FileWriterBucketTest method testRollingOnProcessingTime.
@Test
public void testRollingOnProcessingTime() throws IOException {
File outDir = TEMP_FOLDER.newFolder();
Path path = new Path(outDir.toURI());
RollingPolicy<String, String> onProcessingTimeRollingPolicy = DefaultRollingPolicy.builder().withRolloverInterval(Duration.ofMillis(10)).build();
TestRecoverableWriter recoverableWriter = getRecoverableWriter(path);
FileWriterBucket<String> bucket = createBucket(recoverableWriter, path, onProcessingTimeRollingPolicy, OutputFileConfig.builder().build());
bucket.write("test-element", 11);
bucket.write("test-element", 12);
bucket.onProcessingTime(20);
assertNotNull("The bucket should not roll since interval is not reached", bucket.getInProgressPart());
bucket.write("test-element", 21);
bucket.onProcessingTime(21);
assertNull("The bucket should roll since interval is reached", bucket.getInProgressPart());
List<FileSinkCommittable> fileSinkCommittables = bucket.prepareCommit(false);
compareNumberOfPendingAndInProgress(fileSinkCommittables, 1, 0);
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactCoordinatorTest method testSizeThreshold.
@Test
public void testSizeThreshold() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().setSizeThreshold(10).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
FileSinkCommittable committable0 = committable("0", ".0", 5);
FileSinkCommittable committable1 = committable("0", ".1", 6);
harness.processElement(message(committable0));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.processElement(message(committable1));
List<CompactorRequest> results = harness.extractOutputValues();
Assert.assertEquals(1, results.size());
assertToCompact(results.get(0), committable0, committable1);
harness.processElement(message(committable("0", ".2", 5)));
harness.processElement(message(committable("1", ".0", 5)));
Assert.assertEquals(1, harness.extractOutputValues().size());
}
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactCoordinatorTest method testCompactOverMultipleCheckpoints.
@Test
public void testCompactOverMultipleCheckpoints() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().enableCompactionOnCheckpoint(3).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
FileSinkCommittable committable0 = committable("0", ".0", 5);
FileSinkCommittable committable1 = committable("0", ".1", 6);
harness.processElement(message(committable0));
harness.processElement(message(committable1));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(1);
harness.snapshot(1, 1);
harness.prepareSnapshotPreBarrier(2);
harness.snapshot(2, 2);
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(3);
harness.snapshot(3, 3);
List<CompactorRequest> results = harness.extractOutputValues();
Assert.assertEquals(1, results.size());
assertToCompact(results.get(0), committable0, committable1);
}
}
Aggregations