use of org.apache.flink.runtime.io.network.buffer.BufferCompressor in project flink by apache.
the class ResultPartitionFactory method create.
@VisibleForTesting
public ResultPartition create(String taskNameWithSubtaskAndId, int partitionIndex, ResultPartitionID id, ResultPartitionType type, int numberOfSubpartitions, int maxParallelism, SupplierWithException<BufferPool, IOException> bufferPoolFactory) {
BufferCompressor bufferCompressor = null;
if (type.isBlocking() && blockingShuffleCompressionEnabled) {
bufferCompressor = new BufferCompressor(networkBufferSize, compressionCodec);
}
ResultSubpartition[] subpartitions = new ResultSubpartition[numberOfSubpartitions];
final ResultPartition partition;
if (type == ResultPartitionType.PIPELINED || type == ResultPartitionType.PIPELINED_BOUNDED || type == ResultPartitionType.PIPELINED_APPROXIMATE) {
final PipelinedResultPartition pipelinedPartition = new PipelinedResultPartition(taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions, maxParallelism, partitionManager, bufferCompressor, bufferPoolFactory);
for (int i = 0; i < subpartitions.length; i++) {
if (type == ResultPartitionType.PIPELINED_APPROXIMATE) {
subpartitions[i] = new PipelinedApproximateSubpartition(i, configuredNetworkBuffersPerChannel, pipelinedPartition);
} else {
subpartitions[i] = new PipelinedSubpartition(i, configuredNetworkBuffersPerChannel, pipelinedPartition);
}
}
partition = pipelinedPartition;
} else if (type == ResultPartitionType.BLOCKING || type == ResultPartitionType.BLOCKING_PERSISTENT) {
if (numberOfSubpartitions >= sortShuffleMinParallelism) {
partition = new SortMergeResultPartition(taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions.length, maxParallelism, batchShuffleReadBufferPool, batchShuffleReadIOExecutor, partitionManager, channelManager.createChannel().getPath(), bufferCompressor, bufferPoolFactory);
} else {
final BoundedBlockingResultPartition blockingPartition = new BoundedBlockingResultPartition(taskNameWithSubtaskAndId, partitionIndex, id, type, subpartitions, maxParallelism, partitionManager, bufferCompressor, bufferPoolFactory);
initializeBoundedBlockingPartitions(subpartitions, blockingPartition, blockingSubpartitionType, networkBufferSize, channelManager, sslEnabled);
partition = blockingPartition;
}
} else {
throw new IllegalArgumentException("Unrecognized ResultPartitionType: " + type);
}
LOG.debug("{}: Initialized {}", taskNameWithSubtaskAndId, this);
return partition;
}
use of org.apache.flink.runtime.io.network.buffer.BufferCompressor in project flink by apache.
the class SingleInputGateTest method testGetCompressedBuffer.
/**
* Tests that the compressed buffer will be decompressed after calling {@link
* SingleInputGate#getNext()}.
*/
@Test
public void testGetCompressedBuffer() throws Exception {
int bufferSize = 1024;
String compressionCodec = "LZ4";
BufferCompressor compressor = new BufferCompressor(bufferSize, compressionCodec);
BufferDecompressor decompressor = new BufferDecompressor(bufferSize, compressionCodec);
try (SingleInputGate inputGate = new SingleInputGateBuilder().setBufferDecompressor(decompressor).build()) {
TestInputChannel inputChannel = new TestInputChannel(inputGate, 0);
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(bufferSize);
for (int i = 0; i < bufferSize; i += 8) {
segment.putLongLittleEndian(i, i);
}
Buffer uncompressedBuffer = new NetworkBuffer(segment, FreeingBufferRecycler.INSTANCE);
uncompressedBuffer.setSize(bufferSize);
Buffer compressedBuffer = compressor.compressToOriginalBuffer(uncompressedBuffer);
assertTrue(compressedBuffer.isCompressed());
inputChannel.read(compressedBuffer);
inputGate.setInputChannels(inputChannel);
inputGate.notifyChannelNonEmpty(inputChannel);
Optional<BufferOrEvent> bufferOrEvent = inputGate.getNext();
assertTrue(bufferOrEvent.isPresent());
assertTrue(bufferOrEvent.get().isBuffer());
ByteBuffer buffer = bufferOrEvent.get().getBuffer().getNioBufferReadable().order(ByteOrder.LITTLE_ENDIAN);
for (int i = 0; i < bufferSize; i += 8) {
assertEquals(i, buffer.getLong());
}
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferCompressor in project flink by apache.
the class CreditBasedPartitionRequestClientHandlerTest method testReceiveCompressedBuffer.
/**
* Verifies that {@link BufferResponse} of compressed {@link Buffer} can be handled correctly.
*/
@Test
public void testReceiveCompressedBuffer() throws Exception {
int bufferSize = 1024;
String compressionCodec = "LZ4";
BufferCompressor compressor = new BufferCompressor(bufferSize, compressionCodec);
BufferDecompressor decompressor = new BufferDecompressor(bufferSize, compressionCodec);
NetworkBufferPool networkBufferPool = new NetworkBufferPool(10, bufferSize);
SingleInputGate inputGate = new SingleInputGateBuilder().setBufferDecompressor(decompressor).setSegmentProvider(networkBufferPool).build();
RemoteInputChannel inputChannel = createRemoteInputChannel(inputGate, null);
inputGate.setInputChannels(inputChannel);
try {
BufferPool bufferPool = networkBufferPool.createBufferPool(8, 8);
inputGate.setBufferPool(bufferPool);
inputGate.setupChannels();
CreditBasedPartitionRequestClientHandler handler = new CreditBasedPartitionRequestClientHandler();
handler.addInputChannel(inputChannel);
Buffer buffer = compressor.compressToOriginalBuffer(TestBufferFactory.createBuffer(bufferSize));
BufferResponse bufferResponse = createBufferResponse(buffer, 0, inputChannel.getInputChannelId(), 2, new NetworkBufferAllocator(handler));
assertTrue(bufferResponse.isCompressed);
handler.channelRead(null, bufferResponse);
Buffer receivedBuffer = inputChannel.getNextReceivedBuffer();
assertNotNull(receivedBuffer);
assertTrue(receivedBuffer.isCompressed());
receivedBuffer.recycleBuffer();
} finally {
releaseResource(inputGate, networkBufferPool);
}
}
Aggregations