use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class MultipleInputStreamTaskChainedSourcesCheckpointingTest method testSourceCheckpointFirstUnaligned.
/**
* In this scenario: 1. checkpoint is triggered via RPC and source is blocked 2. unaligned
* checkpoint is performed 3. all data from network inputs are processed
*/
@Test
public void testSourceCheckpointFirstUnaligned() throws Exception {
try (StreamTaskMailboxTestHarness<String> testHarness = buildTestHarness(true, objectReuse)) {
testHarness.setAutoProcess(false);
ArrayDeque<Object> expectedOutput = new ArrayDeque<>();
addRecords(testHarness);
CheckpointBarrier barrier = createBarrier(testHarness);
Future<Boolean> checkpointFuture = testHarness.getStreamTask().triggerCheckpointAsync(metaData, barrier.getCheckpointOptions());
processSingleStepUntil(testHarness, checkpointFuture::isDone);
assertThat(testHarness.getOutput(), contains(barrier));
testHarness.processAll();
expectedOutput.add(new StreamRecord<>("44", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("44", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("47.0", TimestampAssigner.NO_TIMESTAMP));
expectedOutput.add(new StreamRecord<>("47.0", TimestampAssigner.NO_TIMESTAMP));
ArrayList<Object> actualOutput = new ArrayList<>(testHarness.getOutput());
assertThat(actualOutput.subList(1, expectedOutput.size() + 1), containsInAnyOrder(expectedOutput.toArray()));
}
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class OneInputStreamTaskTest method testCheckpointBarriers.
/**
* This test verifies that checkpoint barriers are correctly forwarded.
*/
@Test
public void testCheckpointBarriers() throws Exception {
final OneInputStreamTaskTestHarness<String, String> testHarness = new OneInputStreamTaskTestHarness<>(OneInputStreamTask::new, 2, 2, BasicTypeInfo.STRING_TYPE_INFO, BasicTypeInfo.STRING_TYPE_INFO);
testHarness.setupOutputForSingletonOperatorChain();
StreamConfig streamConfig = testHarness.getStreamConfig();
StreamMap<String, String> mapOperator = new StreamMap<>(new IdentityMap());
streamConfig.setStreamOperator(mapOperator);
streamConfig.setOperatorID(new OperatorID());
ConcurrentLinkedQueue<Object> expectedOutput = new ConcurrentLinkedQueue<>();
long initialTime = 0L;
testHarness.invoke();
testHarness.waitForTaskRunning();
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 0, 0);
// These elements should be forwarded, since we did not yet receive a checkpoint barrier
// on that input, only add to same input, otherwise we would not know the ordering
// of the output since the Task might read the inputs in any order
testHarness.processElement(new StreamRecord<>("Hello-1-1", initialTime), 1, 1);
testHarness.processElement(new StreamRecord<>("Ciao-1-1", initialTime), 1, 1);
expectedOutput.add(new StreamRecord<>("Hello-1-1", initialTime));
expectedOutput.add(new StreamRecord<>("Ciao-1-1", initialTime));
testHarness.waitForInputProcessing();
// we should not yet see the barrier, only the two elements from non-blocked input
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 0, 1);
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 1, 0);
testHarness.processEvent(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()), 1, 1);
testHarness.waitForInputProcessing();
// now we should see the barrier
expectedOutput.add(new CheckpointBarrier(0, 0, CheckpointOptions.forCheckpointWithDefaultLocation()));
testHarness.endInput();
testHarness.waitForTaskCompletion();
TestHarnessUtil.assertOutputEquals("Output was not correct.", expectedOutput, testHarness.getOutput());
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class RecordWriterTest method testBroadcastEventMixedRecords.
/**
* Tests broadcasting events when records have been emitted.
*/
@Test
public void testBroadcastEventMixedRecords() throws Exception {
Random rand = new XORShiftRandom();
int numberOfChannels = 4;
int bufferSize = 32;
// serialized length
int lenBytes = 4;
ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
RecordWriter<ByteArrayIO> writer = createRecordWriter(partition);
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 1292L, Integer.MAX_VALUE + 199L, CheckpointOptions.forCheckpointWithDefaultLocation());
// Emit records on some channels first (requesting buffers), then
// broadcast the event. The record buffers should be emitted first, then
// the event. After the event, no new buffer should be requested.
// (i) Smaller than the buffer size
byte[] bytes = new byte[bufferSize / 2];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (ii) Larger than the buffer size
bytes = new byte[bufferSize + 1];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iii) Exactly the buffer size
bytes = new byte[bufferSize - lenBytes];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iv) Broadcast the event
writer.broadcastEvent(barrier);
if (isBroadcastWriter) {
assertEquals(3, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
for (int i = 0; i < numberOfChannels; i++) {
// 3 buffer + 1 event
assertEquals(4, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
for (int j = 0; j < 3; j++) {
assertTrue(parseBuffer(view.getNextBuffer().buffer(), 0).isBuffer());
}
BufferOrEvent boe = parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
}
} else {
assertEquals(4, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
ResultSubpartitionView[] views = new ResultSubpartitionView[4];
// 1 buffer + 1 event
assertEquals(2, partition.getNumberOfQueuedBuffers(0));
views[0] = partition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[0].getNextBuffer().buffer(), 0).isBuffer());
// 2 buffers + 1 event
assertEquals(3, partition.getNumberOfQueuedBuffers(1));
views[1] = partition.createSubpartitionView(1, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[1].getNextBuffer().buffer(), 1).isBuffer());
assertTrue(parseBuffer(views[1].getNextBuffer().buffer(), 1).isBuffer());
// 1 buffer + 1 event
assertEquals(2, partition.getNumberOfQueuedBuffers(2));
views[2] = partition.createSubpartitionView(2, new NoOpBufferAvailablityListener());
assertTrue(parseBuffer(views[2].getNextBuffer().buffer(), 2).isBuffer());
views[3] = partition.createSubpartitionView(3, new NoOpBufferAvailablityListener());
// 0 buffers + 1 event
assertEquals(1, partition.getNumberOfQueuedBuffers(3));
// every queue's last element should be the event
for (int i = 0; i < numberOfChannels; i++) {
BufferOrEvent boe = parseBuffer(views[i].getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
}
}
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class EventSerializerTest method testToBufferConsumer.
@Test
public void testToBufferConsumer() throws IOException {
for (AbstractEvent evt : events) {
BufferConsumer bufferConsumer = EventSerializer.toBufferConsumer(evt, false);
assertFalse(bufferConsumer.isBuffer());
assertTrue(bufferConsumer.isFinished());
assertTrue(bufferConsumer.isDataAvailable());
assertFalse(bufferConsumer.isRecycled());
if (evt instanceof CheckpointBarrier) {
assertTrue(bufferConsumer.build().getDataType().isBlockingUpstream());
} else {
assertEquals(Buffer.DataType.EVENT_BUFFER, bufferConsumer.build().getDataType());
}
}
}
use of org.apache.flink.runtime.io.network.api.CheckpointBarrier in project flink by apache.
the class EventSerializerTest method testToBuffer.
@Test
public void testToBuffer() throws IOException {
for (AbstractEvent evt : events) {
Buffer buffer = EventSerializer.toBuffer(evt, false);
assertFalse(buffer.isBuffer());
assertTrue(buffer.readableBytes() > 0);
assertFalse(buffer.isRecycled());
if (evt instanceof CheckpointBarrier) {
assertTrue(buffer.getDataType().isBlockingUpstream());
} else {
assertEquals(Buffer.DataType.EVENT_BUFFER, buffer.getDataType());
}
}
}
Aggregations