use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class ResultSubpartitionRecoveredStateHandler method recover.
@Override
public void recover(ResultSubpartitionInfo subpartitionInfo, int oldSubtaskIndex, BufferWithContext<BufferBuilder> bufferWithContext) throws IOException {
try (BufferBuilder bufferBuilder = bufferWithContext.context) {
try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumerFromBeginning()) {
bufferBuilder.finish();
if (bufferConsumer.isDataAvailable()) {
final List<CheckpointedResultSubpartition> channels = getMappedChannels(subpartitionInfo);
for (final CheckpointedResultSubpartition channel : channels) {
// channel selector is created from the downstream's point of view: the
// subtask of downstream = subpartition index of recovered buffer
final SubtaskConnectionDescriptor channelSelector = new SubtaskConnectionDescriptor(subpartitionInfo.getSubPartitionIdx(), oldSubtaskIndex);
channel.addRecovered(EventSerializer.toBufferConsumer(channelSelector, false));
channel.addRecovered(bufferConsumer.copy());
}
}
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class PipelinedSubpartition method processPriorityBuffer.
private boolean processPriorityBuffer(BufferConsumer bufferConsumer, int partialRecordLength) {
buffers.addPriorityElement(new BufferConsumerWithPartialRecordLength(bufferConsumer, partialRecordLength));
final int numPriorityElements = buffers.getNumPriorityElements();
CheckpointBarrier barrier = parseCheckpointBarrier(bufferConsumer);
if (barrier != null) {
checkState(barrier.getCheckpointOptions().isUnalignedCheckpoint(), "Only unaligned checkpoints should be priority events");
final Iterator<BufferConsumerWithPartialRecordLength> iterator = buffers.iterator();
Iterators.advance(iterator, numPriorityElements);
List<Buffer> inflightBuffers = new ArrayList<>();
while (iterator.hasNext()) {
BufferConsumer buffer = iterator.next().getBufferConsumer();
if (buffer.isBuffer()) {
try (BufferConsumer bc = buffer.copy()) {
inflightBuffers.add(bc.build());
}
}
}
if (!inflightBuffers.isEmpty()) {
channelStateWriter.addOutputData(barrier.getId(), subpartitionInfo, ChannelStateWriter.SEQUENCE_NUMBER_UNKNOWN, inflightBuffers.toArray(new Buffer[0]));
}
}
return numPriorityElements == 1 && // if subpartition is blocked then downstream doesn't expect any
!isBlocked;
// notifications
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class PipelinedSubpartition method parseCheckpointBarrier.
@Nullable
private CheckpointBarrier parseCheckpointBarrier(BufferConsumer bufferConsumer) {
CheckpointBarrier barrier;
try (BufferConsumer bc = bufferConsumer.copy()) {
Buffer buffer = bc.build();
try {
final AbstractEvent event = EventSerializer.fromBuffer(buffer, getClass().getClassLoader());
barrier = event instanceof CheckpointBarrier ? (CheckpointBarrier) event : null;
} catch (IOException e) {
throw new IllegalStateException("Should always be able to deserialize in-memory event", e);
} finally {
buffer.recycleBuffer();
}
}
return barrier;
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class HashBasedDataBuffer method writeEvent.
private void writeEvent(ByteBuffer source, int targetChannel, Buffer.DataType dataType) {
BufferBuilder builder = builders[targetChannel];
if (builder != null) {
builder.finish();
buffers[targetChannel].add(builder.createBufferConsumerFromBeginning());
builder.close();
builders[targetChannel] = null;
}
MemorySegment segment = MemorySegmentFactory.allocateUnpooledOffHeapMemory(source.remaining());
segment.put(0, source, segment.size());
BufferConsumer consumer = new BufferConsumer(new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE, dataType), segment.size());
buffers[targetChannel].add(consumer);
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class EventSerializer method toBufferConsumer.
public static BufferConsumer toBufferConsumer(AbstractEvent event, boolean hasPriority) throws IOException {
final ByteBuffer serializedEvent = EventSerializer.toSerializedEvent(event);
MemorySegment data = MemorySegmentFactory.wrap(serializedEvent.array());
return new BufferConsumer(new NetworkBuffer(data, FreeingBufferRecycler.INSTANCE, getDataType(event, hasPriority)), data.size());
}
Aggregations