use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactCoordinatorTest method testPassthrough.
@Test
public void testPassthrough() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().setSizeThreshold(10).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
FileSinkCommittable cleanupToPassthrough = cleanupInprogress("0", ".0", 1);
FileSinkCommittable sizeUnavailableToPassthrough = committable("0", ".1", -1);
FileSinkCommittable pathNotHidToPassThrough = committable("0", "2", -1);
FileSinkCommittable normalCommittable = committable("0", ".3", 10);
harness.processElement(message(cleanupToPassthrough));
harness.processElement(message(sizeUnavailableToPassthrough));
harness.processElement(message(pathNotHidToPassThrough));
harness.processElement(message(normalCommittable));
List<CompactorRequest> results = harness.extractOutputValues();
Assert.assertEquals(1, results.size());
assertToCompact(results.get(0), normalCommittable);
assertToPassthrough(results.get(0), cleanupToPassthrough, sizeUnavailableToPassthrough, pathNotHidToPassThrough);
}
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactCoordinatorTest method testRestore.
@Test
public void testRestore() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().setSizeThreshold(10).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
FileSinkCommittable committable0 = committable("0", ".0", 5);
FileSinkCommittable committable1 = committable("0", ".1", 6);
FileSinkCommittable committable2 = committable("0", ".2", 5);
FileSinkCommittable committable3 = committable("1", ".0", 5);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
harness.processElement(message(committable0));
Assert.assertEquals(0, harness.extractOutputValues().size());
harness.prepareSnapshotPreBarrier(1);
state = harness.snapshot(1, 1);
}
coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.initializeState(state);
harness.open();
harness.processElement(message(committable1));
Assert.assertEquals(1, harness.extractOutputValues().size());
harness.processElement(message(committable2));
harness.processElement(message(committable3));
Assert.assertEquals(1, harness.extractOutputValues().size());
harness.endInput();
List<CompactorRequest> results = harness.extractOutputValues();
Assert.assertEquals(3, results.size());
assertToCompact(results.get(0), committable0, committable1);
assertToCompact(results.get(1), committable2);
assertToCompact(results.get(2), committable3);
}
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactCoordinatorTest method testRestoreWithChangedStrategy.
@Test
public void testRestoreWithChangedStrategy() throws Exception {
FileCompactStrategy strategy = Builder.newBuilder().setSizeThreshold(100).build();
CompactCoordinator coordinator = new CompactCoordinator(strategy, getTestCommittableSerializer());
FileSinkCommittable committable0 = committable("0", ".0", 5);
FileSinkCommittable committable1 = committable("0", ".1", 6);
FileSinkCommittable committable2 = committable("0", ".2", 7);
FileSinkCommittable committable3 = committable("0", ".3", 8);
FileSinkCommittable committable4 = committable("0", ".4", 9);
FileSinkCommittable committable5 = committable("0", ".5", 2);
OperatorSubtaskState state;
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(coordinator)) {
harness.setup();
harness.open();
harness.processElement(message(committable0));
harness.processElement(message(committable1));
harness.processElement(message(committable2));
harness.processElement(message(committable3));
harness.processElement(message(committable4));
harness.prepareSnapshotPreBarrier(1);
state = harness.snapshot(1, 1);
Assert.assertEquals(0, harness.extractOutputValues().size());
}
FileCompactStrategy changedStrategy = Builder.newBuilder().setSizeThreshold(10).build();
CompactCoordinator changedCoordinator = new CompactCoordinator(changedStrategy, getTestCommittableSerializer());
try (OneInputStreamOperatorTestHarness<CommittableMessage<FileSinkCommittable>, CompactorRequest> harness = new OneInputStreamOperatorTestHarness<>(changedCoordinator)) {
harness.setup();
harness.initializeState(state);
harness.open();
Assert.assertEquals(2, harness.extractOutputValues().size());
harness.processElement(message(committable5));
List<CompactorRequest> results = harness.extractOutputValues();
Assert.assertEquals(3, results.size());
assertToCompact(results.get(0), committable0, committable1);
assertToCompact(results.get(1), committable2, committable3);
assertToCompact(results.get(2), committable4, committable5);
}
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class FileWriterBucket method prepareCommit.
List<FileSinkCommittable> prepareCommit(boolean endOfInput) throws IOException {
if (inProgressPart != null && (rollingPolicy.shouldRollOnCheckpoint(inProgressPart) || endOfInput)) {
if (LOG.isDebugEnabled()) {
LOG.debug("Closing in-progress part file for bucket id={} on checkpoint.", bucketId);
}
closePartFile();
}
List<FileSinkCommittable> committables = new ArrayList<>();
pendingFiles.forEach(pendingFile -> committables.add(new FileSinkCommittable(bucketId, pendingFile)));
pendingFiles.clear();
if (inProgressFileToCleanup != null) {
committables.add(new FileSinkCommittable(bucketId, inProgressFileToCleanup));
inProgressFileToCleanup = null;
}
return committables;
}
use of org.apache.flink.connector.file.sink.FileSinkCommittable in project flink by apache.
the class CompactorOperatorStateHandler method open.
@Override
public void open() throws Exception {
super.open();
this.compactService = new CompactService(1, fileCompactor, bucketWriter);
compactService.open();
if (stateRemaining != null) {
for (Map<Long, List<CompactorRequest>> requests : stateRemaining) {
for (Map.Entry<Long, List<CompactorRequest>> e : requests.entrySet()) {
for (CompactorRequest request : e.getValue()) {
// each toCompact must be wrapped with a single compact request, since
// multiple files compacting is not available now
List<FileSinkCommittable> toCompactList = request.getCommittableToCompact();
List<FileSinkCommittable> toPassThrough = request.getCommittableToPassthrough();
String bucketId = !toCompactList.isEmpty() ? toCompactList.get(0).getBucketId() : toPassThrough.get(0).getBucketId();
for (FileSinkCommittable toCompact : toCompactList) {
CompactorRequest compactRequest = new CompactorRequest(bucketId);
compactRequest.addToCompact(toCompact);
submit(compactRequest);
}
CompactorRequest passThroughRequest = new CompactorRequest(bucketId);
toPassThrough.forEach(passThroughRequest::addToPassthrough);
submit(passThroughRequest);
}
}
}
}
stateRemaining = null;
}
Aggregations