use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class BufferWritingResultPartition method requestNewBroadcastBufferBuilder.
private BufferBuilder requestNewBroadcastBufferBuilder() throws IOException {
checkInProduceState();
ensureBroadcastMode();
final BufferBuilder bufferBuilder = requestNewBufferBuilderFromPool(0);
broadcastBufferBuilder = bufferBuilder;
return bufferBuilder;
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class BufferWritingResultPartition method requestNewBufferBuilderFromPool.
private BufferBuilder requestNewBufferBuilderFromPool(int targetSubpartition) throws IOException {
BufferBuilder bufferBuilder = bufferPool.requestBufferBuilder(targetSubpartition);
if (bufferBuilder != null) {
return bufferBuilder;
}
hardBackPressuredTimeMsPerSecond.markStart();
try {
bufferBuilder = bufferPool.requestBufferBuilderBlocking(targetSubpartition);
hardBackPressuredTimeMsPerSecond.markEnd();
return bufferBuilder;
} catch (InterruptedException e) {
throw new IOException("Interrupted while waiting for buffer");
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class BufferWritingResultPartition method appendUnicastDataForNewRecord.
private BufferBuilder appendUnicastDataForNewRecord(final ByteBuffer record, final int targetSubpartition) throws IOException {
if (targetSubpartition < 0 || targetSubpartition > unicastBufferBuilders.length) {
throw new ArrayIndexOutOfBoundsException(targetSubpartition);
}
BufferBuilder buffer = unicastBufferBuilders[targetSubpartition];
if (buffer == null) {
buffer = requestNewUnicastBufferBuilder(targetSubpartition);
addToSubpartition(buffer, targetSubpartition, 0, record.remaining());
}
buffer.appendAndCommit(record);
return buffer;
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class HashBasedDataBuffer method release.
@Override
public void release() {
if (isReleased) {
return;
}
isReleased = true;
for (int channel = 0; channel < builders.length; ++channel) {
BufferBuilder builder = builders[channel];
if (builder != null) {
builder.close();
builders[channel] = null;
}
}
for (ArrayDeque<BufferConsumer> buffer : buffers) {
BufferConsumer consumer = buffer.poll();
while (consumer != null) {
consumer.close();
consumer = buffer.poll();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferBuilder in project flink by splunk.
the class IteratorWrappingTestSingleInputGate method wrapIterator.
private IteratorWrappingTestSingleInputGate<T> wrapIterator(MutableObjectIterator<T> iterator) throws IOException, InterruptedException {
inputIterator = iterator;
serializer = new DataOutputSerializer(128);
// The input iterator can produce an infinite stream. That's why we have to serialize each
// record on demand and cannot do it upfront.
final BufferAndAvailabilityProvider answer = new BufferAndAvailabilityProvider() {
private boolean hasData = inputIterator.next(reuse) != null;
@Override
public Optional<BufferAndAvailability> getBufferAvailability() throws IOException {
if (hasData) {
ByteBuffer serializedRecord = RecordWriter.serializeRecord(serializer, reuse);
BufferBuilder bufferBuilder = createBufferBuilder(bufferSize);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferBuilder.appendAndCommit(serializedRecord);
hasData = inputIterator.next(reuse) != null;
// Call getCurrentBuffer to ensure size is set
final Buffer.DataType nextDataType = hasData ? Buffer.DataType.DATA_BUFFER : Buffer.DataType.EVENT_BUFFER;
return Optional.of(new BufferAndAvailability(bufferConsumer.build(), nextDataType, 0, 0));
} else {
inputChannel.setReleased();
return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE, false), Buffer.DataType.NONE, 0, 0));
}
}
};
inputChannel.addBufferAndAvailability(answer);
inputGate.setInputChannels(inputChannel);
return this;
}
Aggregations