use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.
the class MultipleInputStreamTaskChainedSourcesCheckpointingTest method testStopWithSavepointDrainWaitsForSourcesFinish.
/**
* In this scenario:
*
* <ul>
* <li>Network inputs are processed until CheckpointBarriers for synchronous savepoint are
* processed.
* <li>RPC for stop-with-savepoint comes for sources
* <li>Sources keep being invoked until they return END_OF_DATA
* <li>Synchronous savepoint is triggered
* </ul>
*/
@Test
public void testStopWithSavepointDrainWaitsForSourcesFinish() throws Exception {
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).setCollectNetworkEvents().modifyExecutionConfig(applyObjectReuse(objectReuse)).modifyStreamConfig(config -> config.setCheckpointingEnabled(true)).addInput(BasicTypeInfo.STRING_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.DOUBLE_TYPE_INFO).setupOutputForSingletonOperatorChain(new MapToStringMultipleInputOperatorFactory(4, true)).build()) {
testHarness.setAutoProcess(false);
ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
CheckpointBarrier barrier = createStopWithSavepointDrainBarrier();
testHarness.processElement(new StreamRecord<>("44", TimestampAssigner.NO_TIMESTAMP), 0);
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0);
testHarness.processEvent(barrier, 0);
testHarness.processElement(new StreamRecord<>(47d, TimestampAssigner.NO_TIMESTAMP), 1);
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 1);
testHarness.processEvent(barrier, 1);
addSourceRecords(testHarness, 1, Boundedness.CONTINUOUS_UNBOUNDED, 1, 2);
addSourceRecords(testHarness, 2, Boundedness.CONTINUOUS_UNBOUNDED, 3, 4);
testHarness.processAll();
Future<Boolean> checkpointFuture = testHarness.getStreamTask().triggerCheckpointAsync(metaData, barrier.getCheckpointOptions());
processSingleStepUntil(testHarness, checkpointFuture::isDone);
expectedOutput.add(new StreamRecord<>("3", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("47.0", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("44", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("1", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("4", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("2", TimestampAssigner.NO_TIMESTAMP));
ArrayList<Object> actualOutput = new ArrayList<>(testHarness.getOutput());
assertThat(actualOutput.subList(0, expectedOutput.size()), containsInAnyOrder(expectedOutput.toArray()));
assertThat(actualOutput.subList(actualOutput.size() - 3, actualOutput.size()), contains(new StreamRecord<>("FINISH"), new EndOfData(StopMode.DRAIN), barrier));
}
}
use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.
the class MultipleInputStreamTaskChainedSourcesCheckpointingTest method testTriggerCheckpointWithFinishedChannelsAndSourceChain.
private void testTriggerCheckpointWithFinishedChannelsAndSourceChain(CheckpointOptions checkpointOptions) throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyStreamConfig(config -> {
config.setCheckpointingEnabled(true);
config.setUnalignedCheckpointsEnabled(checkpointOptions.isUnalignedCheckpoint() || checkpointOptions.isTimeoutable());
}).modifyExecutionConfig(applyObjectReuse(objectReuse)).setCheckpointResponder(checkpointResponder).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MultipleInputStreamTaskTest.LifeCycleTrackingMockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addSourceInput(new SourceOperatorFactory<>(new MultipleInputStreamTaskTest.LifeCycleTrackingMockSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addAdditionalOutput(partitionWriters).setupOperatorChain(new MapToStringMultipleInputOperatorFactory(4)).finishForSingletonOperatorChain(StringSerializer.INSTANCE).build()) {
checkpointResponder.setHandlers(testHarness.streamTask::notifyCheckpointCompleteAsync, testHarness.streamTask::notifyCheckpointAbortAsync);
testHarness.getStreamTask().getCheckpointBarrierHandler().get();
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2, checkpointOptions);
testHarness.processAll();
// The checkpoint 2 would be aligned after received all the EndOfPartitionEvent.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 1, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 1, 0);
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after all the inputs have received EndOfPartition.
checkpointFuture = triggerCheckpoint(testHarness, 4, checkpointOptions);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
// The checkpoint 4 would be triggered successfully.
testHarness.processAll();
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(4, testHarness.getTaskStateManager().getReportedCheckpointId());
// Each result partition should have emitted 2 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(3, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.
the class MultipleInputStreamTaskChainedSourcesCheckpointingTest method testSkipExecutionsIfFinishedOnRestoreWithSourceChained.
@Test
public void testSkipExecutionsIfFinishedOnRestoreWithSourceChained() throws Exception {
OperatorID firstSourceOperatorId = new OperatorID();
OperatorID secondSourceOperatorId = new OperatorID();
OperatorID nonSourceOperatorId = new OperatorID();
List<Object> output = new ArrayList<>();
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).modifyStreamConfig(config -> config.setCheckpointingEnabled(true)).modifyExecutionConfig(applyObjectReuse(objectReuse)).addInput(BasicTypeInfo.INT_TYPE_INFO).addAdditionalOutput(new RecordOrEventCollectingResultPartitionWriter<StreamElement>(output, new StreamElementSerializer<>(IntSerializer.INSTANCE)) {
@Override
public void notifyEndOfData(StopMode mode) throws IOException {
broadcastEvent(new EndOfData(mode), false);
}
}).addSourceInput(firstSourceOperatorId, new SourceOperatorFactory<>(new SourceOperatorStreamTaskTest.LifeCycleMonitorSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).addSourceInput(secondSourceOperatorId, new SourceOperatorFactory<>(new SourceOperatorStreamTaskTest.LifeCycleMonitorSource(Boundedness.CONTINUOUS_UNBOUNDED, 1), WatermarkStrategy.noWatermarks()), BasicTypeInfo.INT_TYPE_INFO).setTaskStateSnapshot(1, TaskStateSnapshot.FINISHED_ON_RESTORE).setupOperatorChain(nonSourceOperatorId, new LifeCycleMonitorMultipleInputOperatorFactory()).chain(new TestFinishedOnRestoreStreamOperator(), StringSerializer.INSTANCE).finish().build()) {
testHarness.processElement(Watermark.MAX_WATERMARK);
assertThat(output, is(empty()));
testHarness.waitForTaskCompletion();
assertThat(output, contains(Watermark.MAX_WATERMARK, new EndOfData(StopMode.DRAIN)));
for (StreamOperatorWrapper<?, ?> wrapper : testHarness.getStreamTask().operatorChain.getAllOperators()) {
if (wrapper.getStreamOperator() instanceof SourceOperator<?, ?>) {
SourceOperatorStreamTaskTest.LifeCycleMonitorSourceReader sourceReader = (SourceOperatorStreamTaskTest.LifeCycleMonitorSourceReader) ((SourceOperator<?, ?>) wrapper.getStreamOperator()).getSourceReader();
sourceReader.getLifeCycleMonitor().assertCallTimes(0, LifeCyclePhase.values());
}
}
}
}
use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.
the class MultipleInputStreamTaskTest method testTriggeringCheckpointWithFinishedChannels.
private void testTriggeringCheckpointWithFinishedChannels(CheckpointOptions checkpointOptions) throws Exception {
ResultPartition[] partitionWriters = new ResultPartition[2];
try {
for (int i = 0; i < partitionWriters.length; ++i) {
partitionWriters[i] = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
partitionWriters[i].setup();
}
CompletingCheckpointResponder checkpointResponder = new CompletingCheckpointResponder();
try (StreamTaskMailboxTestHarness<String> testHarness = new StreamTaskMailboxTestHarnessBuilder<>(MultipleInputStreamTask::new, BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.STRING_TYPE_INFO).addInput(BasicTypeInfo.INT_TYPE_INFO).addInput(BasicTypeInfo.DOUBLE_TYPE_INFO).addAdditionalOutput(partitionWriters).setCheckpointResponder(checkpointResponder).modifyStreamConfig(config -> {
config.setCheckpointingEnabled(true);
config.setUnalignedCheckpointsEnabled(checkpointOptions.isUnalignedCheckpoint() || checkpointOptions.isTimeoutable());
}).setupOperatorChain(new MapToStringMultipleInputOperatorFactory(3)).finishForSingletonOperatorChain(StringSerializer.INSTANCE).build()) {
checkpointResponder.setHandlers(testHarness.streamTask::notifyCheckpointCompleteAsync, testHarness.streamTask::notifyCheckpointAbortAsync);
testHarness.getStreamTask().getCheckpointBarrierHandler().get();
// Tests triggering checkpoint when all the inputs are alive.
CompletableFuture<Boolean> checkpointFuture = triggerCheckpoint(testHarness, 2, checkpointOptions);
processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
assertEquals(2, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after some inputs have received EndOfPartition.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 0, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 0, 0);
checkpointFuture = triggerCheckpoint(testHarness, 4, checkpointOptions);
processMailTillCheckpointSucceeds(testHarness, checkpointFuture);
assertEquals(4, testHarness.getTaskStateManager().getReportedCheckpointId());
// Tests triggering checkpoint after all the inputs have received EndOfPartition.
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 1, 0);
testHarness.processEvent(new EndOfData(StopMode.DRAIN), 2, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 1, 0);
testHarness.processEvent(EndOfPartitionEvent.INSTANCE, 2, 0);
checkpointFuture = triggerCheckpoint(testHarness, 6, checkpointOptions);
// Notifies the result partition that all records are processed after the
// last checkpoint is triggered.
checkpointFuture.thenAccept((ignored) -> {
for (ResultPartition resultPartition : partitionWriters) {
resultPartition.onSubpartitionAllDataProcessed(0);
}
});
// The checkpoint 6 would be triggered successfully.
testHarness.processAll();
testHarness.finishProcessing();
assertTrue(checkpointFuture.isDone());
testHarness.getTaskStateManager().getWaitForReportLatch().await();
assertEquals(6, testHarness.getTaskStateManager().getReportedCheckpointId());
// Each result partition should have emitted 3 barriers and 1 EndOfUserRecordsEvent.
for (ResultPartition resultPartition : partitionWriters) {
assertEquals(4, resultPartition.getNumberOfQueuedBuffers());
}
}
} finally {
for (ResultPartitionWriter writer : partitionWriters) {
if (writer != null) {
writer.close();
}
}
}
}
use of org.apache.flink.runtime.io.network.api.EndOfData in project flink by apache.
the class EventSerializer method fromSerializedEvent.
public static AbstractEvent fromSerializedEvent(ByteBuffer buffer, ClassLoader classLoader) throws IOException {
if (buffer.remaining() < 4) {
throw new IOException("Incomplete event");
}
final ByteOrder bufferOrder = buffer.order();
buffer.order(ByteOrder.BIG_ENDIAN);
try {
final int type = buffer.getInt();
if (type == END_OF_PARTITION_EVENT) {
return EndOfPartitionEvent.INSTANCE;
} else if (type == CHECKPOINT_BARRIER_EVENT) {
return deserializeCheckpointBarrier(buffer);
} else if (type == END_OF_SUPERSTEP_EVENT) {
return EndOfSuperstepEvent.INSTANCE;
} else if (type == END_OF_CHANNEL_STATE_EVENT) {
return EndOfChannelStateEvent.INSTANCE;
} else if (type == END_OF_USER_RECORDS_EVENT) {
return new EndOfData(StopMode.values()[buffer.get()]);
} else if (type == CANCEL_CHECKPOINT_MARKER_EVENT) {
long id = buffer.getLong();
return new CancelCheckpointMarker(id);
} else if (type == ANNOUNCEMENT_EVENT) {
int sequenceNumber = buffer.getInt();
AbstractEvent announcedEvent = fromSerializedEvent(buffer, classLoader);
return new EventAnnouncement(announcedEvent, sequenceNumber);
} else if (type == VIRTUAL_CHANNEL_SELECTOR_EVENT) {
return new SubtaskConnectionDescriptor(buffer.getInt(), buffer.getInt());
} else if (type == OTHER_EVENT) {
try {
final DataInputDeserializer deserializer = new DataInputDeserializer(buffer);
final String className = deserializer.readUTF();
final Class<? extends AbstractEvent> clazz;
try {
clazz = classLoader.loadClass(className).asSubclass(AbstractEvent.class);
} catch (ClassNotFoundException e) {
throw new IOException("Could not load event class '" + className + "'.", e);
} catch (ClassCastException e) {
throw new IOException("The class '" + className + "' is not a valid subclass of '" + AbstractEvent.class.getName() + "'.", e);
}
final AbstractEvent event = InstantiationUtil.instantiate(clazz, AbstractEvent.class);
event.read(deserializer);
return event;
} catch (Exception e) {
throw new IOException("Error while deserializing or instantiating event.", e);
}
} else {
throw new IOException("Corrupt byte stream for event");
}
} finally {
buffer.order(bufferOrder);
}
}
Aggregations