use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class OneInputStreamTaskTest method testOvertakingCheckpointBarriers.
/**
* This test verifies that checkpoint barriers and barrier buffers work correctly with
* concurrent checkpoint barriers where one checkpoint is "overtaking" another checkpoint, i.e.
* some inputs receive barriers from an earlier checkpoint, thereby blocking, then all inputs
* receive barriers from a later checkpoint.
*/
@Test
public void testOvertakingCheckpointBarriers() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, 2, 2, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamMap<String, String> mapOperator = new StreamMap<>(new IdentityMap());
streamConfig.setStreamOperator(mapOperator);
streamConfig.setOperatorID(new OperatorID());
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
long initialTime = 0L;
testHarness.invoke();
testHarness.waitForTaskRunning();
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 0, 0);
// These elements should be forwarded, since we did not yet receive a checkpoint barrier
// on that input, only add to same input, otherwise we would not know the ordering
// of the output since the Task might read the inputs in any order
testHarness.processElement(new StreamRecord<>("Hello-1-1", initialTime), 1, 1);
testHarness.processElement(new StreamRecord<>("Ciao-1-1", initialTime), 1, 1);
expectedOutput.add(new StreamRecord<>("Hello-1-1", initialTime));
expectedOutput.add(new StreamRecord<>("Ciao-1-1", initialTime));
testHarness.waitForInputProcessing();
// we should not yet see the barrier, only the two elements from non-blocked input
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
// Now give a later barrier to all inputs, this should unblock the first channel
testHarness.processEvent(new CheckpointBarrier(1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()), 0, 1);
testHarness.processEvent(new CheckpointBarrier(1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()), 0, 0);
testHarness.processEvent(new CheckpointBarrier(1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()), 1, 0);
testHarness.processEvent(new CheckpointBarrier(1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()), 1, 1);
expectedOutput.add(new CancelCheckpointMarker(0));
expectedOutput.add(new CheckpointBarrier(1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()));
testHarness.waitForInputProcessing();
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
// Then give the earlier barrier, these should be ignored
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 0, 1);
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 1, 0);
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 1, 1);
testHarness.waitForInputProcessing();
testHarness.endInput();
testHarness.waitForTaskCompletion();
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class RecordWriterTest method testBroadcastEventNoRecords.
// ---------------------------------------------------------------------------------------------
// Resource release tests
// ---------------------------------------------------------------------------------------------
/**
* Tests broadcasting events when no records have been emitted yet.
*/
@Test
public void testBroadcastEventNoRecords() throws Exception {
int numberOfChannels = 4;
int bufferSize = 32;
ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
RecordWriter<ByteArrayIO> writer = createRecordWriter(partition);
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 919192L, Integer.MAX_VALUE + 18828228L, CheckpointOptions.forCheckpointWithDefaultLocation());
// No records emitted yet, broadcast should not request a buffer
writer.broadcastEvent(barrier);
assertEquals(0, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
for (int i = 0; i < numberOfChannels; i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
BufferOrEvent boe = parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
assertFalse(view.getAvailabilityAndBacklog(Integer.MAX_VALUE).isAvailable());
}
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class CheckpointSerializationTest method testCheckpointBarrierSerialization.
private void testCheckpointBarrierSerialization(CheckpointOptions options) throws IOException {
final long checkpointId = Integer.MAX_VALUE + 123123L;
final long timestamp = Integer.MAX_VALUE + 1228L;
final CheckpointBarrier barrierBeforeSerialization = new CheckpointBarrier(checkpointId, timestamp, options);
final CheckpointBarrier barrierAfterDeserialization = serializeAndDeserializeCheckpointBarrier(barrierBeforeSerialization);
assertEquals(barrierBeforeSerialization, barrierAfterDeserialization);
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class CheckpointSerializationTest method serializeAndDeserializeCheckpointBarrier.
private CheckpointBarrier serializeAndDeserializeCheckpointBarrier(final CheckpointBarrier barrierUnderTest) throws IOException {
final ClassLoader cl = Thread.currentThread().getContextClassLoader();
final ByteBuffer serialized = EventSerializer.toSerializedEvent(barrierUnderTest);
final CheckpointBarrier deserialized = (CheckpointBarrier) EventSerializer.fromSerializedEvent(serialized, cl);
assertFalse(serialized.hasRemaining());
return deserialized;
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class EventSerializer method toSerializedEvent.
// ------------------------------------------------------------------------
// Serialization Logic
// ------------------------------------------------------------------------
public static ByteBuffer toSerializedEvent(AbstractEvent event) throws IOException {
final Class<?> eventClass = event.getClass();
if (eventClass == EndOfPartitionEvent.class) {
return ByteBuffer.wrap(new byte[] { 0, 0, 0, END_OF_PARTITION_EVENT });
} else if (eventClass == CheckpointBarrier.class) {
return serializeCheckpointBarrier((CheckpointBarrier) event);
} else if (eventClass == EndOfSuperstepEvent.class) {
return ByteBuffer.wrap(new byte[] { 0, 0, 0, END_OF_SUPERSTEP_EVENT });
} else if (eventClass == EndOfChannelStateEvent.class) {
return ByteBuffer.wrap(new byte[] { 0, 0, 0, END_OF_CHANNEL_STATE_EVENT });
} else if (eventClass == EndOfData.class) {
return ByteBuffer.wrap(new byte[] { 0, 0, 0, END_OF_USER_RECORDS_EVENT, (byte) ((EndOfData) event).getStopMode().ordinal() });
} else if (eventClass == CancelCheckpointMarker.class) {
CancelCheckpointMarker marker = (CancelCheckpointMarker) event;
ByteBuffer buf = ByteBuffer.allocate(12);
buf.putInt(0, CANCEL_CHECKPOINT_MARKER_EVENT);
buf.putLong(4, marker.getCheckpointId());
return buf;
} else if (eventClass == EventAnnouncement.class) {
EventAnnouncement announcement = (EventAnnouncement) event;
ByteBuffer serializedAnnouncedEvent = toSerializedEvent(announcement.getAnnouncedEvent());
ByteBuffer serializedAnnouncement = ByteBuffer.allocate(2 * Integer.BYTES + serializedAnnouncedEvent.capacity());
serializedAnnouncement.putInt(0, ANNOUNCEMENT_EVENT);
serializedAnnouncement.putInt(4, announcement.getSequenceNumber());
serializedAnnouncement.position(8);
serializedAnnouncement.put(serializedAnnouncedEvent);
serializedAnnouncement.flip();
return serializedAnnouncement;
} else if (eventClass == SubtaskConnectionDescriptor.class) {
SubtaskConnectionDescriptor selector = (SubtaskConnectionDescriptor) event;
ByteBuffer buf = ByteBuffer.allocate(12);
buf.putInt(VIRTUAL_CHANNEL_SELECTOR_EVENT);
buf.putInt(selector.getInputSubtaskIndex());
buf.putInt(selector.getOutputSubtaskIndex());
buf.flip();
return buf;
} else {
try {
final DataOutputSerializer serializer = new DataOutputSerializer(128);
serializer.writeInt(OTHER_EVENT);
serializer.writeUTF(event.getClass().getName());
event.write(serializer);
return serializer.wrapAsByteBuffer();
} catch (IOException e) {
throw new IOException("Error while serializing event.", e);
}
}
}
Aggregations