use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class IteratorWrappingTestSingleInputGate method wrapIterator.
private IteratorWrappingTestSingleInputGate<T> wrapIterator(MutableObjectIterator<T> iterator) throws IOException, InterruptedException {
inputIterator = iterator;
serializer = new DataOutputSerializer(128);
// The input iterator can produce an infinite stream. That's why we have to serialize each
// record on demand and cannot do it upfront.
final BufferAndAvailabilityProvider answer = new BufferAndAvailabilityProvider() {
private boolean hasData = inputIterator.next(reuse) != null;
@Override
public Optional<BufferAndAvailability> getBufferAvailability() throws IOException {
if (hasData) {
ByteBuffer serializedRecord = RecordWriter.serializeRecord(serializer, reuse);
BufferBuilder bufferBuilder = createBufferBuilder(bufferSize);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferBuilder.appendAndCommit(serializedRecord);
hasData = inputIterator.next(reuse) != null;
// Call getCurrentBuffer to ensure size is set
final Buffer.DataType nextDataType = hasData ? Buffer.DataType.DATA_BUFFER : Buffer.DataType.EVENT_BUFFER;
return Optional.of(new BufferAndAvailability(bufferConsumer.build(), nextDataType, 0, 0));
} else {
inputChannel.setReleased();
return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE, false), Buffer.DataType.NONE, 0, 0));
}
}
};
inputChannel.addBufferAndAvailability(answer);
inputGate.setInputChannels(inputChannel);
return this;
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class BufferWritingResultPartition method emitRecord.
@Override
public void emitRecord(ByteBuffer record, int targetSubpartition) throws IOException {
totalWrittenBytes += record.remaining();
BufferBuilder buffer = appendUnicastDataForNewRecord(record, targetSubpartition);
while (record.hasRemaining()) {
// full buffer, partial record
finishUnicastBufferBuilder(targetSubpartition);
buffer = appendUnicastDataForRecordContinuation(record, targetSubpartition);
}
if (buffer.isFull()) {
// full buffer, full record
finishUnicastBufferBuilder(targetSubpartition);
}
// partial buffer, full record
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class HashBasedDataBuffer method release.
@Override
public void release() {
if (isReleased) {
return;
}
isReleased = true;
for (int channel = 0; channel < builders.length; ++channel) {
BufferBuilder builder = builders[channel];
if (builder != null) {
builder.close();
builders[channel] = null;
}
}
for (ArrayDeque<BufferConsumer> buffer : buffers) {
BufferConsumer consumer = buffer.poll();
while (consumer != null) {
consumer.close();
consumer = buffer.poll();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class HashBasedDataBuffer method finish.
@Override
public void finish() {
checkState(!isFull, "DataBuffer must not be full.");
checkState(!isFinished, "DataBuffer is already finished.");
isFull = true;
isFinished = true;
for (int channel = 0; channel < builders.length; ++channel) {
BufferBuilder builder = builders[channel];
if (builder != null) {
builder.finish();
buffers[channel].add(builder.createBufferConsumerFromBeginning());
builder.close();
builders[channel] = null;
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by apache.
the class SpanningRecordSerializationTest method setNextBufferForSerializer.
private static BufferAndSerializerResult setNextBufferForSerializer(ByteBuffer serializedRecord, int segmentSize) throws IOException {
// create a bufferBuilder with some random starting offset to properly test handling buffer
// slices in the
// deserialization code.
int startingOffset = segmentSize > 2 ? RANDOM.nextInt(segmentSize / 2) : 0;
BufferBuilder bufferBuilder = createFilledBufferBuilder(segmentSize + startingOffset, startingOffset);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferConsumer.build().recycleBuffer();
// Closing the BufferBuilder here just allow to be sure that Buffer will be recovered when
// BufferConsumer will be closed.
bufferBuilder.close();
bufferBuilder.appendAndCommit(serializedRecord);
return new BufferAndSerializerResult(bufferBuilder, bufferConsumer, bufferBuilder.isFull(), !serializedRecord.hasRemaining());
}
Aggregations