use of org.apache.flink.runtime.checkpoint.CheckpointOptions in project flink by apache.
the class AlternatingCheckpointsTest method testOutOfOrderBarrier.
@Test
public void testOutOfOrderBarrier() throws Exception {
SingleInputGate inputGate = new SingleInputGateBuilder().setNumberOfChannels(2).build();
TestInputChannel firstChannel = new TestInputChannel(inputGate, 0);
TestInputChannel secondChannel = new TestInputChannel(inputGate, 1);
inputGate.setInputChannels(firstChannel, secondChannel);
ValidatingCheckpointHandler target = new ValidatingCheckpointHandler();
SingleCheckpointBarrierHandler barrierHandler = getTestBarrierHandlerFactory(target).create(inputGate);
long checkpointId = 10;
long outOfOrderSavepointId = 5;
barrierHandler.processBarrier(new CheckpointBarrier(checkpointId, clock.relativeTimeMillis(), new CheckpointOptions(CHECKPOINT, getDefault())), new InputChannelInfo(0, 0), false);
secondChannel.setBlocked(true);
barrierHandler.processBarrier(new CheckpointBarrier(outOfOrderSavepointId, clock.relativeTimeMillis(), new CheckpointOptions(SavepointType.savepoint(SavepointFormatType.CANONICAL), getDefault())), new InputChannelInfo(0, 1), false);
assertEquals(checkpointId, barrierHandler.getLatestCheckpointId());
assertFalse(secondChannel.isBlocked());
}
use of org.apache.flink.runtime.checkpoint.CheckpointOptions in project flink by apache.
the class LocalInputChannelTest method testCheckpointingInflightData.
@Test
public void testCheckpointingInflightData() throws Exception {
SingleInputGate inputGate = new SingleInputGateBuilder().build();
PipelinedResultPartition parent = (PipelinedResultPartition) PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED, NoOpFileChannelManager.INSTANCE);
ResultSubpartition subpartition = parent.getAllPartitions()[0];
ResultSubpartitionView subpartitionView = subpartition.createReadView(() -> {
});
TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(subpartitionView);
final RecordingChannelStateWriter stateWriter = new RecordingChannelStateWriter();
LocalInputChannel channel = createLocalInputChannel(inputGate, partitionManager, 0, 0, b -> b.setStateWriter(stateWriter));
inputGate.setInputChannels(channel);
channel.requestSubpartition();
final CheckpointStorageLocationReference location = getDefault();
CheckpointOptions options = CheckpointOptions.unaligned(CheckpointType.CHECKPOINT, location);
stateWriter.start(0, options);
final CheckpointBarrier barrier = new CheckpointBarrier(0, 123L, options);
channel.checkpointStarted(barrier);
// add 1 buffer before barrier and 1 buffer afterwards. Only the first buffer should be
// written.
subpartition.add(createFilledFinishedBufferConsumer(1));
assertTrue(channel.getNextBuffer().isPresent());
subpartition.add(EventSerializer.toBufferConsumer(barrier, true));
assertTrue(channel.getNextBuffer().isPresent());
subpartition.add(createFilledFinishedBufferConsumer(2));
assertTrue(channel.getNextBuffer().isPresent());
assertArrayEquals(stateWriter.getAddedInput().get(channel.getChannelInfo()).stream().mapToInt(Buffer::getSize).toArray(), new int[] { 1 });
}
use of org.apache.flink.runtime.checkpoint.CheckpointOptions in project flink by apache.
the class StreamTaskFinalCheckpointsTest method testOperatorSkipLifeCycleIfFinishedOnRestore.
@Test
public void testOperatorSkipLifeCycleIfFinishedOnRestore() throws Exception {
try (StreamTaskMailboxTestHarness<String> harness = new StreamTaskMailboxTestHarnessBuilder<>(OneInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO, 3).setCollectNetworkEvents().setTaskStateSnapshot(1, TaskStateSnapshot.FINISHED_ON_RESTORE).setupOperatorChain(new TestFinishedOnRestoreStreamOperator()).chain(new TestFinishedOnRestoreStreamOperator(), StringSerializer.INSTANCE).finish().build()) {
// Finish the restore, including state initialization and open.
harness.processAll();
// Try trigger a checkpoint.
harness.getTaskStateManager().getWaitForReportLatch().reset();
CheckpointMetaData checkpointMetaData = new CheckpointMetaData(2, 2);
CheckpointOptions checkpointOptions = new CheckpointOptions(CheckpointType.CHECKPOINT, getDefault());
harness.streamTask.triggerCheckpointOnBarrier(checkpointMetaData, checkpointOptions, new CheckpointMetricsBuilder().setBytesProcessedDuringAlignment(0).setAlignmentDurationNanos(0));
harness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(2, harness.getTaskStateManager().getReportedCheckpointId());
// Checkpoint notification.
harness.streamTask.notifyCheckpointCompleteAsync(2);
harness.streamTask.notifyCheckpointAbortAsync(3, 2);
harness.processAll();
// Finish & close operators.
harness.processElement(Watermark.MAX_WATERMARK, 0, 0);
harness.processElement(Watermark.MAX_WATERMARK, 0, 1);
harness.processElement(Watermark.MAX_WATERMARK, 0, 2);
harness.waitForTaskCompletion();
harness.finishProcessing();
assertThat(harness.getOutput(), contains(new CheckpointBarrier(checkpointMetaData.getCheckpointId(), checkpointMetaData.getTimestamp(), checkpointOptions), Watermark.MAX_WATERMARK, new EndOfData(StopMode.DRAIN)));
}
}
use of org.apache.flink.runtime.checkpoint.CheckpointOptions in project flink by apache.
the class SourceOperatorStreamTaskTest method executeAndWaitForCheckpoint.
private TaskStateSnapshot executeAndWaitForCheckpoint(long checkpointId, TaskStateSnapshot initialSnapshot, IntStream expectedRecords) throws Exception {
try (StreamTaskMailboxTestHarness<Integer> testHarness = createTestHarness(checkpointId, initialSnapshot)) {
// Add records to the splits.
MockSourceSplit split = getAndMaybeAssignSplit(testHarness);
// Add records to the split and update expected output.
addRecords(split, NUM_RECORDS);
// Process all the records.
testHarness.processAll();
CheckpointOptions checkpointOptions = CheckpointOptions.forCheckpointWithDefaultLocation();
triggerCheckpointWaitForFinish(testHarness, checkpointId, checkpointOptions);
// Build expected output to verify the results
Queue<Object> expectedOutput = new LinkedList<>();
expectedRecords.forEach(r -> expectedOutput.offer(new StreamRecord<>(r, TimestampAssigner.NO_TIMESTAMP)));
// Add barrier to the expected output.
expectedOutput.add(new CheckpointBarrier(checkpointId, checkpointId, checkpointOptions));
assertEquals(checkpointId, testHarness.taskStateManager.getReportedCheckpointId());
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
return testHarness.taskStateManager.getLastJobManagerTaskStateSnapshot();
}
}
use of org.apache.flink.runtime.checkpoint.CheckpointOptions in project flink by apache.
the class SourceOperatorStreamTaskTest method testSnapshotAndAdvanceToEndOfEventTime.
@Test
public void testSnapshotAndAdvanceToEndOfEventTime() throws Exception {
final int checkpointId = 1;
try (StreamTaskMailboxTestHarness<Integer> testHarness = createTestHarness(checkpointId, null)) {
getAndMaybeAssignSplit(testHarness);
final CheckpointOptions checkpointOptions = new CheckpointOptions(SavepointType.terminate(SavepointFormatType.CANONICAL), CheckpointStorageLocationReference.getDefault());
triggerCheckpointWaitForFinish(testHarness, checkpointId, checkpointOptions);
Queue<Object> expectedOutput = new LinkedList<>();
expectedOutput.add(Watermark.MAX_WATERMARK);
expectedOutput.add(new EndOfData(StopMode.DRAIN));
expectedOutput.add(new CheckpointBarrier(checkpointId, checkpointId, checkpointOptions));
assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
}
}
Aggregations