use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class ResultSubpartitionRecoveredStateHandler method recover.
@Override
public void recover(ResultSubpartitionInfo subpartitionInfo, int oldSubtaskIndex, BufferWithContext<BufferBuilder> bufferWithContext) throws IOException {
try (BufferBuilder bufferBuilder = bufferWithContext.context) {
try (BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumerFromBeginning()) {
bufferBuilder.finish();
if (bufferConsumer.isDataAvailable()) {
final List<CheckpointedResultSubpartition> channels = getMappedChannels(subpartitionInfo);
for (final CheckpointedResultSubpartition channel : channels) {
// channel selector is created from the downstream's point of view: the
// subtask of downstream = subpartition index of recovered buffer
final SubtaskConnectionDescriptor channelSelector = new SubtaskConnectionDescriptor(subpartitionInfo.getSubPartitionIdx(), oldSubtaskIndex);
channel.addRecovered(EventSerializer.toBufferConsumer(channelSelector, false));
channel.addRecovered(bufferConsumer.copy());
}
}
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class ResultSubpartitionRecoveredStateHandler method getBuffer.
@Override
public BufferWithContext<BufferBuilder> getBuffer(ResultSubpartitionInfo subpartitionInfo) throws IOException, InterruptedException {
// request the buffer from any mapped subpartition as they all will receive the same buffer
final List<CheckpointedResultSubpartition> channels = getMappedChannels(subpartitionInfo);
BufferBuilder bufferBuilder = channels.get(0).requestBufferBuilderBlocking();
return new BufferWithContext<>(wrap(bufferBuilder), bufferBuilder);
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class HashBasedDataBuffer method writeRecord.
private void writeRecord(ByteBuffer source, int targetChannel) throws IOException {
do {
BufferBuilder builder = builders[targetChannel];
if (builder == null) {
builder = requestBufferFromPool();
if (builder == null) {
break;
}
++numBuffersOccupied;
builders[targetChannel] = builder;
}
builder.append(source);
if (builder.isFull()) {
builder.finish();
buffers[targetChannel].add(builder.createBufferConsumerFromBeginning());
builder.close();
builders[targetChannel] = null;
}
} while (source.hasRemaining());
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class HashBasedDataBuffer method writeEvent.
private void writeEvent(ByteBuffer source, int targetChannel, Buffer.DataType dataType) {
BufferBuilder builder = builders[targetChannel];
if (builder != null) {
builder.finish();
buffers[targetChannel].add(builder.createBufferConsumerFromBeginning());
builder.close();
builders[targetChannel] = null;
}
MemorySegment segment = MemorySegmentFactory.allocateUnpooledOffHeapMemory(source.remaining());
segment.put(0, source, segment.size());
BufferConsumer consumer = new BufferConsumer(new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE, dataType), segment.size());
buffers[targetChannel].add(consumer);
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class BufferWritingResultPartition method broadcastRecord.
@Override
public void broadcastRecord(ByteBuffer record) throws IOException {
totalWrittenBytes += ((long) record.remaining() * numSubpartitions);
BufferBuilder buffer = appendBroadcastDataForNewRecord(record);
while (record.hasRemaining()) {
// full buffer, partial record
finishBroadcastBufferBuilder();
buffer = appendBroadcastDataForRecordContinuation(record);
}
if (buffer.isFull()) {
// full buffer, full record
finishBroadcastBufferBuilder();
}
// partial buffer, full record
}
Aggregations