use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class SingleInputGateTest method testGetCompressedBuffer.
/**
* Tests that the compressed buffer will be decompressed after calling {@link
* SingleInputGate#getNext()}.
*/
@Test
public void testGetCompressedBuffer() throws Exception {
int bufferSize = 1024;
String compressionCodec = "LZ4";
BufferCompressor compressor = new BufferCompressor(bufferSize, compressionCodec);
BufferDecompressor decompressor = new BufferDecompressor(bufferSize, compressionCodec);
try (SingleInputGate inputGate = new SingleInputGateBuilder().setBufferDecompressor(decompressor).build()) {
TestInputChannel inputChannel = new TestInputChannel(inputGate, 0);
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(bufferSize);
for (int i = 0; i < bufferSize; i += 8) {
segment.putLongLittleEndian(i, i);
}
Buffer uncompressedBuffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE);
uncompressedBuffer.setSize(bufferSize);
Buffer compressedBuffer = compressor.compressToOriginalBuffer(uncompressedBuffer);
assertTrue(compressedBuffer.isCompressed());
inputChannel.read(compressedBuffer);
inputGate.setInputChannels(inputChannel);
inputGate.notifyChannelNonEmpty(inputChannel);
Optional<BufferOrEvent> bufferOrEvent = inputGate.getNext();
assertTrue(bufferOrEvent.isPresent());
assertTrue(bufferOrEvent.get().isBuffer());
ByteBuffer buffer = bufferOrEvent.get().getBuffer().getNioBufferReadable().order(ByteOrder.LITTLE_ENDIAN);
for (int i = 0; i < bufferSize; i += 8) {
assertEquals(i, buffer.getLong());
}
}
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class BufferFileWriterFileSegmentReaderTest method testWriteRead.
@Test
public void testWriteRead() throws IOException, InterruptedException {
int numBuffers = 1024;
int currentNumber = 0;
final int minBufferSize = BUFFER_SIZE / 4;
// Write buffers filled with ascending numbers...
for (int i = 0; i < numBuffers; i++) {
final Buffer buffer = createBuffer();
int size = getNextMultipleOf(getRandomNumberInRange(minBufferSize, BUFFER_SIZE), 4);
currentNumber = fillBufferWithAscendingNumbers(buffer, currentNumber, size);
writer.writeBlock(buffer);
}
// Make sure that the writes are finished
writer.close();
// Read buffers back in...
for (int i = 0; i < numBuffers; i++) {
assertFalse(reader.hasReachedEndOfFile());
reader.read();
}
// Wait for all requests to be finished
final CountDownLatch sync = new CountDownLatch(1);
final NotificationListener listener = new NotificationListener() {
@Override
public void onNotification() {
sync.countDown();
}
};
if (reader.registerAllRequestsProcessedListener(listener)) {
sync.await();
}
assertTrue(reader.hasReachedEndOfFile());
// Verify that the content is the same
assertEquals("Read less buffers than written.", numBuffers, returnedFileSegments.size());
currentNumber = 0;
FileSegment fileSegment;
ByteBuffer buffer = ByteBuffer.allocate(BUFFER_SIZE);
while ((fileSegment = returnedFileSegments.poll()) != null) {
buffer.position(0);
buffer.limit(fileSegment.getLength());
fileSegment.getFileChannel().read(buffer, fileSegment.getPosition());
Buffer buffer1 = new NetworkBuffer(MemorySegmentFactory.wrap(buffer.array()), BUFFER_RECYCLER);
buffer1.setSize(fileSegment.getLength());
currentNumber = verifyBufferFilledWithAscendingNumbers(buffer1, currentNumber);
}
reader.close();
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class SortMergeResultPartitionTest method testWriteAndRead.
@Test
public void testWriteAndRead() throws Exception {
int numBuffers = useHashDataBuffer ? 100 : 15;
int numSubpartitions = 10;
int numRecords = 1000;
Random random = new Random();
BufferPool bufferPool = globalPool.createBufferPool(numBuffers, numBuffers);
SortMergeResultPartition partition = createSortMergedPartition(numSubpartitions, bufferPool);
Queue<DataBufferTest.DataAndType>[] dataWritten = new Queue[numSubpartitions];
Queue<Buffer>[] buffersRead = new Queue[numSubpartitions];
for (int i = 0; i < numSubpartitions; ++i) {
dataWritten[i] = new ArrayDeque<>();
buffersRead[i] = new ArrayDeque<>();
}
int[] numBytesWritten = new int[numSubpartitions];
int[] numBytesRead = new int[numSubpartitions];
Arrays.fill(numBytesWritten, 0);
Arrays.fill(numBytesRead, 0);
for (int i = 0; i < numRecords; ++i) {
ByteBuffer record = generateRandomData(random.nextInt(2 * bufferSize) + 1, random);
boolean isBroadCast = random.nextBoolean();
if (isBroadCast) {
partition.broadcastRecord(record);
for (int subpartition = 0; subpartition < numSubpartitions; ++subpartition) {
recordDataWritten(record, dataWritten, subpartition, numBytesWritten, DataType.DATA_BUFFER);
}
} else {
int subpartition = random.nextInt(numSubpartitions);
partition.emitRecord(record, subpartition);
recordDataWritten(record, dataWritten, subpartition, numBytesWritten, DataType.DATA_BUFFER);
}
}
partition.finish();
partition.close();
for (int subpartition = 0; subpartition < numSubpartitions; ++subpartition) {
ByteBuffer record = EventSerializer.toSerializedEvent(EndOfPartitionEvent.INSTANCE);
recordDataWritten(record, dataWritten, subpartition, numBytesWritten, DataType.EVENT_BUFFER);
}
ResultSubpartitionView[] views = createSubpartitionViews(partition, numSubpartitions);
readData(views, bufferWithChannel -> {
Buffer buffer = bufferWithChannel.getBuffer();
int subpartition = bufferWithChannel.getChannelIndex();
int numBytes = buffer.readableBytes();
numBytesRead[subpartition] += numBytes;
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(numBytes);
segment.put(0, buffer.getNioBufferReadable(), numBytes);
buffersRead[subpartition].add(new NetworkBuffer(segment, (buf) -> {
}, buffer.getDataType(), numBytes));
});
DataBufferTest.checkWriteReadResult(numSubpartitions, numBytesWritten, numBytesRead, dataWritten, buffersRead);
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class AbstractCollectingResultPartitionWriter method deserializeRecord.
private void deserializeRecord(ByteBuffer serializedRecord) throws IOException {
checkArgument(serializedRecord.hasArray());
MemorySegment segment = MemorySegmentFactory.wrap(serializedRecord.array());
NetworkBuffer buffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE);
buffer.setSize(serializedRecord.remaining());
deserializeBuffer(buffer);
}
use of org.apache.flink.runtime.io.network.buffer.NetworkBuffer in project flink by apache.
the class NettyMessageClientSideSerializationTest method decompress.
private Buffer decompress(Buffer buffer) {
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(BUFFER_SIZE);
Buffer compressedBuffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE);
buffer.asByteBuf().readBytes(compressedBuffer.asByteBuf(), buffer.readableBytes());
compressedBuffer.setCompressed(true);
return DECOMPRESSOR.decompressToOriginalBuffer(compressedBuffer);
}
Aggregations