use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class SubpartitionTestBase method testAddAfterRelease.
@Test
public void testAddAfterRelease() throws Exception {
final ResultSubpartition subpartition = createSubpartition();
try {
subpartition.release();
BufferConsumer bufferConsumer = createFilledFinishedBufferConsumer(4096);
assertEquals(-1, subpartition.add(bufferConsumer));
assertTrue(bufferConsumer.isRecycled());
} finally {
if (subpartition != null) {
subpartition.release();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class IteratorWrappingTestSingleInputGate method wrapIterator.
private IteratorWrappingTestSingleInputGate<T> wrapIterator(MutableObjectIterator<T> iterator) throws IOException, InterruptedException {
inputIterator = iterator;
serializer = new DataOutputSerializer(128);
// The input iterator can produce an infinite stream. That's why we have to serialize each
// record on demand and cannot do it upfront.
final BufferAndAvailabilityProvider answer = new BufferAndAvailabilityProvider() {
private boolean hasData = inputIterator.next(reuse) != null;
@Override
public Optional<BufferAndAvailability> getBufferAvailability() throws IOException {
if (hasData) {
ByteBuffer serializedRecord = RecordWriter.serializeRecord(serializer, reuse);
BufferBuilder bufferBuilder = createBufferBuilder(bufferSize);
BufferConsumer bufferConsumer = bufferBuilder.createBufferConsumer();
bufferBuilder.appendAndCommit(serializedRecord);
hasData = inputIterator.next(reuse) != null;
// Call getCurrentBuffer to ensure size is set
final Buffer.DataType nextDataType = hasData ? Buffer.DataType.DATA_BUFFER : Buffer.DataType.EVENT_BUFFER;
return Optional.of(new BufferAndAvailability(bufferConsumer.build(), nextDataType, 0, 0));
} else {
inputChannel.setReleased();
return Optional.of(new BufferAndAvailability(EventSerializer.toBuffer(EndOfPartitionEvent.INSTANCE, false), Buffer.DataType.NONE, 0, 0));
}
}
};
inputChannel.addBufferAndAvailability(answer);
inputGate.setInputChannels(inputChannel);
return this;
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class TestSubpartitionProducer method call.
@Override
public Boolean call() throws Exception {
boolean success = false;
try {
BufferAndChannel bufferAndChannel;
while ((bufferAndChannel = source.getNextBuffer()) != null) {
MemorySegment segment = MemorySegmentFactory.wrap(bufferAndChannel.getBuffer());
subpartition.add(new BufferConsumer(new NetworkBuffer(segment, MemorySegment::free, Buffer.DataType.DATA_BUFFER), segment.size()));
// Check for interrupted flag after adding data to prevent resource leaks
if (Thread.interrupted()) {
throw new InterruptedException();
}
if (isSlowProducer) {
Thread.sleep(random.nextInt(MAX_SLEEP_TIME_MS + 1));
}
}
subpartition.finish();
success = true;
return true;
} finally {
if (!success) {
subpartition.release();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class HashBasedDataBuffer method release.
@Override
public void release() {
if (isReleased) {
return;
}
isReleased = true;
for (int channel = 0; channel < builders.length; ++channel) {
BufferBuilder builder = builders[channel];
if (builder != null) {
builder.close();
builders[channel] = null;
}
}
for (ArrayDeque<BufferConsumer> buffer : buffers) {
BufferConsumer consumer = buffer.poll();
while (consumer != null) {
consumer.close();
consumer = buffer.poll();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferConsumer in project flink by apache.
the class HashBasedDataBuffer method getNextBuffer.
@Override
public BufferWithChannel getNextBuffer(MemorySegment transitBuffer) {
checkState(isFull, "Sort buffer is not ready to be read.");
checkState(!isReleased, "Sort buffer is already released.");
BufferWithChannel buffer = null;
if (!hasRemaining() || readOrderIndex >= subpartitionReadOrder.length) {
return null;
}
int targetChannel = subpartitionReadOrder[readOrderIndex];
while (buffer == null) {
BufferConsumer consumer = buffers[targetChannel].poll();
if (consumer != null) {
buffer = new BufferWithChannel(consumer.build(), targetChannel);
numBuffersOccupied -= buffer.getBuffer().isBuffer() ? 1 : 0;
numTotalBytesRead += buffer.getBuffer().readableBytes();
consumer.close();
} else {
if (++readOrderIndex >= subpartitionReadOrder.length) {
break;
}
targetChannel = subpartitionReadOrder[readOrderIndex];
}
}
return buffer;
}
Aggregations