use of org.apache.flink.runtime.io.network.buffer.BufferConsumerWithPartialRecordLength in project flink by apache.
the class PipelinedSubpartition method processPriorityBuffer.
private boolean processPriorityBuffer(BufferConsumer bufferConsumer, int partialRecordLength) {
buffers.addPriorityElement(new BufferConsumerWithPartialRecordLength(bufferConsumer, partialRecordLength));
final int numPriorityElements = buffers.getNumPriorityElements();
CheckpointBarrier barrier = parseCheckpointBarrier(bufferConsumer);
if (barrier != null) {
checkState(barrier.getCheckpointOptions().isUnalignedCheckpoint(), "Only unaligned checkpoints should be priority events");
final Iterator<BufferConsumerWithPartialRecordLength> iterator = buffers.iterator();
Iterators.advance(iterator, numPriorityElements);
List<Buffer> inflightBuffers = new ArrayList<>();
while (iterator.hasNext()) {
BufferConsumer buffer = iterator.next().getBufferConsumer();
if (buffer.isBuffer()) {
try (BufferConsumer bc = buffer.copy()) {
inflightBuffers.add(bc.build());
}
}
}
if (!inflightBuffers.isEmpty()) {
channelStateWriter.addOutputData(barrier.getId(), subpartitionInfo, ChannelStateWriter.SEQUENCE_NUMBER_UNKNOWN, inflightBuffers.toArray(new Buffer[0]));
}
}
return numPriorityElements == 1 && // if subpartition is blocked then downstream doesn't expect any
!isBlocked;
// notifications
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumerWithPartialRecordLength in project flink by apache.
the class PipelinedSubpartition method release.
@Override
public void release() {
// view reference accessible outside the lock, but assigned inside the locked scope
final PipelinedSubpartitionView view;
synchronized (buffers) {
if (isReleased) {
return;
}
// Release all available buffers
for (BufferConsumerWithPartialRecordLength buffer : buffers) {
buffer.getBufferConsumer().close();
}
buffers.clear();
view = readView;
readView = null;
// Make sure that no further buffers are added to the subpartition
isReleased = true;
}
LOG.debug("{}: Released {}.", parent.getOwningTaskName(), this);
if (view != null) {
view.releaseAllResources();
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumerWithPartialRecordLength in project flink by apache.
the class PipelinedSubpartition method pollBuffer.
@Nullable
BufferAndBacklog pollBuffer() {
synchronized (buffers) {
if (isBlocked) {
return null;
}
Buffer buffer = null;
if (buffers.isEmpty()) {
flushRequested = false;
}
while (!buffers.isEmpty()) {
BufferConsumerWithPartialRecordLength bufferConsumerWithPartialRecordLength = buffers.peek();
BufferConsumer bufferConsumer = bufferConsumerWithPartialRecordLength.getBufferConsumer();
buffer = buildSliceBuffer(bufferConsumerWithPartialRecordLength);
checkState(bufferConsumer.isFinished() || buffers.size() == 1, "When there are multiple buffers, an unfinished bufferConsumer can not be at the head of the buffers queue.");
if (buffers.size() == 1) {
// turn off flushRequested flag if we drained all of the available data
flushRequested = false;
}
if (bufferConsumer.isFinished()) {
requireNonNull(buffers.poll()).getBufferConsumer().close();
decreaseBuffersInBacklogUnsafe(bufferConsumer.isBuffer());
}
// 2. in approximate recovery mode, a partial record takes a whole buffer builder
if (receiverExclusiveBuffersPerChannel == 0 && bufferConsumer.isFinished()) {
break;
}
if (buffer.readableBytes() > 0) {
break;
}
buffer.recycleBuffer();
buffer = null;
if (!bufferConsumer.isFinished()) {
break;
}
}
if (buffer == null) {
return null;
}
if (buffer.getDataType().isBlockingUpstream()) {
isBlocked = true;
}
updateStatistics(buffer);
// Do not report last remaining buffer on buffers as available to read (assuming it's
// unfinished).
// It will be reported for reading either on flush or when the number of buffers in the
// queue
// will be 2 or more.
NetworkActionsLogger.traceOutput("PipelinedSubpartition#pollBuffer", buffer, parent.getOwningTaskName(), subpartitionInfo);
return new BufferAndBacklog(buffer, getBuffersInBacklogUnsafe(), isDataAvailableUnsafe() ? getNextBufferTypeUnsafe() : Buffer.DataType.NONE, sequenceNumber++);
}
}
Aggregations