use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class SyncChannelStateWriteRequestExecutor method testBuffersRecycledOnError.
@Test(expected = TestException.class)
public void testBuffersRecycledOnError() throws Exception {
unwrappingError(TestException.class, () -> {
NetworkBuffer buffer = getBuffer();
try (ChannelStateWriterImpl writer = new ChannelStateWriterImpl(TASK_NAME, new ConcurrentHashMap<>(), failingWorker(), 5)) {
writer.open();
callAddInputData(writer, buffer);
} finally {
assertTrue(buffer.isRecycled());
}
});
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class SyncChannelStateWriteRequestExecutor method testAddEventBuffer.
@Test(expected = IllegalArgumentException.class)
public void testAddEventBuffer() throws Exception {
NetworkBuffer dataBuf = getBuffer();
NetworkBuffer eventBuf = getBuffer();
eventBuf.setDataType(Buffer.DataType.EVENT_BUFFER);
try {
runWithSyncWorker(writer -> {
callStart(writer);
writer.addInputData(CHECKPOINT_ID, new InputChannelInfo(1, 1), 1, ofElements(Buffer::recycleBuffer, eventBuf, dataBuf));
});
} finally {
assertTrue(dataBuf.isRecycled());
}
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class PartitionedFileWriteReadTest method testWriteFinishedPartitionedFile.
@Test(expected = IllegalStateException.class)
public void testWriteFinishedPartitionedFile() throws Exception {
PartitionedFileWriter partitionedFileWriter = createAndFinishPartitionedFileWriter();
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(1024);
NetworkBuffer buffer = new NetworkBuffer(segment, (buf) -> {
});
partitionedFileWriter.writeBuffers(getBufferWithChannels(buffer, 0));
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class TestSubpartitionProducer method call.
@Override
public Boolean call() throws Exception {
boolean success = false;
try {
BufferAndChannel bufferAndChannel;
while ((bufferAndChannel = source.getNextBuffer()) != null) {
MemorySegment segment = MemorySegmentFactory.wrap(bufferAndChannel.getBuffer());
subpartition.add(new BufferConsumer(new NetworkBuffer(segment, MemorySegment::free, Buffer.DataType.DATA_BUFFER), segment.size()));
// Check for interrupted flag after adding data to prevent resource leaks
if (Thread.interrupted()) {
throw new InterruptedException();
}
if (isSlowProducer) {
Thread.sleep(random.nextInt(MAX_SLEEP_TIME_MS + 1));
}
}
subpartition.finish();
success = true;
return true;
} finally {
if (!success) {
subpartition.release();
}
}
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class SortBasedDataBuffer method getNextBuffer.
@Override
public BufferWithChannel getNextBuffer(MemorySegment transitBuffer) {
checkState(isFull, "Sort buffer is not ready to be read.");
checkState(!isReleased, "Sort buffer is already released.");
if (!hasRemaining()) {
return null;
}
int numBytesCopied = 0;
DataType bufferDataType = DataType.DATA_BUFFER;
int channelIndex = subpartitionReadOrder[readOrderIndex];
do {
int sourceSegmentIndex = getSegmentIndexFromPointer(readIndexEntryAddress);
int sourceSegmentOffset = getSegmentOffsetFromPointer(readIndexEntryAddress);
MemorySegment sourceSegment = segments.get(sourceSegmentIndex);
long lengthAndDataType = sourceSegment.getLong(sourceSegmentOffset);
int length = getSegmentIndexFromPointer(lengthAndDataType);
DataType dataType = DataType.values()[getSegmentOffsetFromPointer(lengthAndDataType)];
// return the data read directly if the next to read is an event
if (dataType.isEvent() && numBytesCopied > 0) {
break;
}
bufferDataType = dataType;
// get the next index entry address and move the read position forward
long nextReadIndexEntryAddress = sourceSegment.getLong(sourceSegmentOffset + 8);
sourceSegmentOffset += INDEX_ENTRY_SIZE;
// allocate a temp buffer for the event if the target buffer is not big enough
if (bufferDataType.isEvent() && transitBuffer.size() < length) {
transitBuffer = MemorySegmentFactory.allocateUnpooledSegment(length);
}
numBytesCopied += copyRecordOrEvent(transitBuffer, numBytesCopied, sourceSegmentIndex, sourceSegmentOffset, length);
if (recordRemainingBytes == 0) {
// move to next channel if the current channel has been finished
if (readIndexEntryAddress == lastIndexEntryAddresses[channelIndex]) {
updateReadChannelAndIndexEntryAddress();
break;
}
readIndexEntryAddress = nextReadIndexEntryAddress;
}
} while (numBytesCopied < transitBuffer.size() && bufferDataType.isBuffer());
numTotalBytesRead += numBytesCopied;
Buffer buffer = new NetworkBuffer(transitBuffer, (buf) -> {
}, bufferDataType, numBytesCopied);
return new BufferWithChannel(buffer, channelIndex);
}
Aggregations