use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class PipelinedSubpartition method pollBuffer.
@Nullable
BufferAndBacklog pollBuffer() {
synchronized (buffers) {
if (isBlocked) {
return null;
}
Buffer buffer = null;
if (buffers.isEmpty()) {
flushRequested = false;
}
while (!buffers.isEmpty()) {
BufferConsumerWithPartialRecordLength bufferConsumerWithPartialRecordLength = buffers.peek();
BufferConsumer bufferConsumer = bufferConsumerWithPartialRecordLength.getBufferConsumer();
buffer = buildSliceBuffer(bufferConsumerWithPartialRecordLength);
checkState(bufferConsumer.isFinished() || buffers.size() == 1, "When there are multiple buffers, an unfinished bufferConsumer can not be at the head of the buffers queue.");
if (buffers.size() == 1) {
// turn off flushRequested flag if we drained all of the available data
flushRequested = false;
}
if (bufferConsumer.isFinished()) {
requireNonNull(buffers.poll()).getBufferConsumer().close();
decreaseBuffersInBacklogUnsafe(bufferConsumer.isBuffer());
}
// 2. in approximate recovery mode, a partial record takes a whole buffer builder
if (receiverExclusiveBuffersPerChannel == 0 && bufferConsumer.isFinished()) {
break;
}
if (buffer.readableBytes() > 0) {
break;
}
buffer.recycleBuffer();
buffer = null;
if (!bufferConsumer.isFinished()) {
break;
}
}
if (buffer == null) {
return null;
}
if (buffer.getDataType().isBlockingUpstream()) {
isBlocked = true;
}
updateStatistics(buffer);
// Do not report last remaining buffer on buffers as available to read (assuming it's
// unfinished).
// It will be reported for reading either on flush or when the number of buffers in the
// queue
// will be 2 or more.
NetworkActionsLogger.traceOutput("PipelinedSubpartition#pollBuffer", buffer, parent.getOwningTaskName(), subpartitionInfo);
return new BufferAndBacklog(buffer, getBuffersInBacklogUnsafe(), isDataAvailableUnsafe() ? getNextBufferTypeUnsafe() : Buffer.DataType.NONE, sequenceNumber++);
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class DemultiplexingRecordDeserializerTest method write.
private Buffer write(BufferBuilder bufferBuilder, StreamElement... elements) throws IOException {
try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer()) {
DataOutputSerializer output = new DataOutputSerializer(128);
final SerializationDelegate<StreamElement> delegate = new SerializationDelegate<>(new StreamElementSerializer<>(LongSerializer.INSTANCE));
for (StreamElement element : elements) {
delegate.setInstance(element);
bufferBuilder.appendAndCommit(RecordWriter.serializeRecord(output, delegate));
}
return bufferConsumer.build();
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class SpanningRecordSerializationTest method setNextBufferForSerializer.
private static BufferAndSerializerResult setNextBufferForSerializer(ByteBuffer serializedRecord, int segmentSize) throws IOException {
// create a bufferBuilder with some random starting offset to properly test handling buffer
// slices in the
// deserialization code.
int startingOffset = segmentSize > 2 ? RANDOM.nextInt(segmentSize / 2) : 0;
BufferBuilder bufferBuilder = createFilledBufferBuilder(segmentSize + startingOffset, startingOffset);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferConsumer.build().recycleBuffer();
// Closing the BufferBuilder here just allow to be sure that Buffer will be recovered when
// BufferConsumer will be closed.
bufferBuilder.close();
bufferBuilder.appendAndCommit(serializedRecord);
return new BufferAndSerializerResult(bufferBuilder, bufferConsumer, bufferBuilder.isFull(), !serializedRecord.hasRemaining());
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class SpanningRecordSerializationTest method appendLeftOverBytes.
private static Buffer appendLeftOverBytes(Buffer buffer, byte[] leftOverBytes) {
try (BufferBuilder bufferBuilder = new BufferBuilder(MemorySegmentFactory.allocateUnpooledSegment(buffer.readableBytes() + leftOverBytes.length), FreeingBufferRecycler.INSTANCE)) {
try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer()) {
bufferBuilder.append(buffer.getNioBufferReadable());
bufferBuilder.appendAndCommit(ByteBuffer.wrap(leftOverBytes));
return bufferConsumer.build();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class BoundedBlockingSubpartitionWriteReadTest method writeLongs.
// ------------------------------------------------------------------------
// utils
// ------------------------------------------------------------------------
private static void writeLongs(BoundedBlockingSubpartition partition, long nums) throws IOException {
final MemorySegment memory = MemorySegmentFactory.allocateUnpooledSegment(BUFFER_SIZE);
long l = 0;
while (nums > 0) {
int pos = 0;
for (; nums > 0 && pos <= memory.size() - 8; pos += 8) {
memory.putLongBigEndian(pos, l++);
nums--;
}
partition.add(new BufferConsumer(new NetworkBuffer(memory, (ignored) -> {
}, Buffer.DataType.DATA_BUFFER), pos));
// we need to flush after every buffer as long as the add() contract is that
// buffer are immediately added and can be filled further after that (for low latency
// streaming data exchanges)
partition.flush();
}
}
Aggregations