use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class SubpartitionTestBase method testRecycleBufferAndConsumerOnFailure.
@Test
public void testRecycleBufferAndConsumerOnFailure() throws Exception {
final ResultSubpartition subpartition = createFailingWritesSubpartition();
try {
final BufferConsumer consumer = BufferBuilderTestUtils.createFilledFinishedBufferConsumer(100);
try {
subpartition.add(consumer);
subpartition.flush();
fail("should fail with an exception");
} catch (Exception ignored) {
// expected
}
assertTrue(consumer.isRecycled());
} finally {
subpartition.release();
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class PipelinedSubpartitionTest method verifyViewReleasedAfterParentRelease.
private void verifyViewReleasedAfterParentRelease(ResultSubpartition partition) throws Exception {
// Add a bufferConsumer
BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(BufferBuilderTestUtils.BUFFER_SIZE);
partition.add(bufferConsumer);
partition.finish();
// Create the view
BufferAvailabilityListener listener = mock(BufferAvailabilityListener.class);
ResultSubpartitionView view = partition.createReadView(listener);
// The added bufferConsumer and end-of-partition event
assertNotNull(view.getNextBuffer());
assertNotNull(view.getNextBuffer());
// Release the parent
assertFalse(view.isReleased());
partition.release();
// Verify that parent release is reflected at partition view
assertTrue(view.isReleased());
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class RecordOrEventCollectingResultPartitionWriter method broadcastEvent.
@Override
public void broadcastEvent(AbstractEvent event, boolean isPriorityEvent) throws IOException {
// visible in ITCases or end to end tests.
try (BufferConsumer eventBufferConsumer = EventSerializer.toBufferConsumer(event, isPriorityEvent)) {
Buffer buffer = eventBufferConsumer.build();
try {
AbstractEvent deserializedEvent = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
output.add(deserializedEvent);
} finally {
buffer.recycleBuffer();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class EventSerializerTest method testToBufferConsumer.
@Test
public void testToBufferConsumer() throws IOException {
for (AbstractEvent evt : events) {
BufferConsumer bufferConsumer = EventSerializer.toBufferConsumer(evt, false);
assertFalse(bufferConsumer.isBuffer());
assertTrue(bufferConsumer.isFinished());
assertTrue(bufferConsumer.isDataAvailable());
assertFalse(bufferConsumer.isRecycled());
if (evt instanceof CheckpointBarrier) {
assertTrue(bufferConsumer.build().getDataType().isBlockingUpstream());
} else {
assertEquals(Buffer.DataType.EVENT_BUFFER, bufferConsumer.build().getDataType());
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class StreamTestSingleInputGate method setupInputChannels.
private TestInputChannel[] setupInputChannels() {
TestInputChannel[] inputChannels = new TestInputChannel[numInputChannels];
for (int i = 0; i < numInputChannels; i++) {
final int channelIndex = i;
final DataOutputSerializer dataOutputSerializer = new DataOutputSerializer(128);
final SerializationDelegate<StreamElement> delegate = new SerializationDelegate<>(new StreamElementSerializer<T>(serializer));
inputQueues[channelIndex] = new ConcurrentLinkedQueue<>();
inputChannels[channelIndex] = new TestInputChannel(inputGate, i);
final BufferAndAvailabilityProvider answer = () -> {
ConcurrentLinkedQueue<InputValue<Object>> inputQueue = inputQueues[channelIndex];
InputValue<Object> input;
Buffer.DataType nextType;
synchronized (inputQueue) {
input = inputQueue.poll();
nextType = !inputQueue.isEmpty() ? Buffer.DataType.DATA_BUFFER : Buffer.DataType.NONE;
}
if (input != null && input.isStreamEnd()) {
inputChannels[channelIndex].setReleased();
return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE, false), nextType, 0, 0));
} else if (input != null && input.isDataEnd()) {
return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(new EndOfData(StopMode.DRAIN), false), nextType, 0, 0));
} else if (input != null && input.isStreamRecord()) {
StreamElement inputElement = input.getStreamRecord();
delegate.setInstance(inputElement);
ByteBuffer serializedRecord = RecordWriter.serializeRecord(dataOutputSerializer, delegate);
BufferBuilder bufferBuilder = createBufferBuilder(bufferSize);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferBuilder.appendAndCommit(serializedRecord);
bufferBuilder.finish();
bufferBuilder.close();
// Call getCurrentBuffer to ensure size is set
return Optional.of(new BufferAndAvailability(bufferConsumer.build(), nextType, 0, 0));
} else if (input != null && input.isEvent()) {
AbstractEvent event = input.getEvent();
if (event instanceof EndOfPartitionEvent) {
inputChannels[channelIndex].setReleased();
}
return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(event, false), nextType, 0, 0));
} else {
return Optional.empty();
}
};
inputChannels[channelIndex].addBufferAndAvailability(answer);
}
return inputChannels;
}
Aggregations