use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class SequentialChannelStateReaderImplTest method collectBuffers.
private Map<ResultSubpartitionInfo, List<Buffer>> collectBuffers(BufferWritingResultPartition[] resultPartitions) throws IOException {
Map<ResultSubpartitionInfo, List<Buffer>> actual = new HashMap<>();
for (BufferWritingResultPartition resultPartition : resultPartitions) {
for (int i = 0; i < resultPartition.getNumberOfSubpartitions(); i++) {
ResultSubpartitionInfo info = resultPartition.getAllPartitions()[i].getSubpartitionInfo();
ResultSubpartitionView view = resultPartition.createSubpartitionView(info.getSubPartitionIdx(), new NoOpBufferAvailablityListener());
for (BufferAndBacklog buffer = view.getNextBuffer(); buffer != null; buffer = view.getNextBuffer()) {
if (buffer.buffer().isBuffer()) {
actual.computeIfAbsent(info, unused -> new ArrayList<>()).add(buffer.buffer());
}
}
}
}
return actual;
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class PartitionRequestQueueTest method testNotifyNewBufferSize.
@Test
public void testNotifyNewBufferSize() throws Exception {
// given: Result partition and the reader for subpartition 0.
ResultPartition parent = createResultPartition();
BufferAvailabilityListener bufferAvailabilityListener = new NoOpBufferAvailablityListener();
ResultSubpartitionView view = parent.createSubpartitionView(0, bufferAvailabilityListener);
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view;
InputChannelID receiverId = new InputChannelID();
PartitionRequestQueue queue = new PartitionRequestQueue();
CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 2, queue);
EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
queue.notifyReaderCreated(reader);
// when: New buffer size received.
queue.notifyNewBufferSize(receiverId, 65);
// and: New records emit.
parent.emitRecord(ByteBuffer.allocate(128), 0);
parent.emitRecord(ByteBuffer.allocate(10), 0);
parent.emitRecord(ByteBuffer.allocate(60), 0);
reader.notifyDataAvailable();
channel.runPendingTasks();
// then: Buffers of received size will be in outbound channel.
Object data1 = channel.readOutbound();
// The size can not be less than the first record in buffer.
assertEquals(128, ((NettyMessage.BufferResponse) data1).buffer.getSize());
Object data2 = channel.readOutbound();
// The size should shrink up to notified buffer size.
assertEquals(65, ((NettyMessage.BufferResponse) data2).buffer.getSize());
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class BroadcastRecordWriterTest method testBroadcastMixedRandomEmitRecord.
/**
* Tests the number of requested buffers and results are correct in the case of switching modes
* between {@link BroadcastRecordWriter#broadcastEmit(IOReadableWritable)} and {@link
* BroadcastRecordWriter#randomEmit(IOReadableWritable)}.
*/
@Test
public void testBroadcastMixedRandomEmitRecord() throws Exception {
final int numberOfChannels = 8;
final int numberOfRecords = 8;
final int bufferSize = 32;
final ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
final BroadcastRecordWriter<SerializationTestType> writer = new BroadcastRecordWriter<>(partition, -1, "test");
final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(new String[] { tempFolder.getRoot().getAbsolutePath() });
// generate the configured number of int values as global record set
final Iterable<SerializationTestType> records = Util.randomRecords(numberOfRecords, SerializationTestTypeFactory.INT);
// restore the corresponding record set for every input channel
final Map<Integer, ArrayDeque<SerializationTestType>> serializedRecords = new HashMap<>();
for (int i = 0; i < numberOfChannels; i++) {
serializedRecords.put(i, new ArrayDeque<>());
}
// every record in global set would both emit into one random channel and broadcast to all
// the channels
int index = 0;
for (SerializationTestType record : records) {
int randomChannel = index++ % numberOfChannels;
writer.emit(record, randomChannel);
serializedRecords.get(randomChannel).add(record);
writer.broadcastEmit(record);
for (int i = 0; i < numberOfChannels; i++) {
serializedRecords.get(i).add(record);
}
}
final int numberOfCreatedBuffers = partition.getBufferPool().bestEffortGetNumOfUsedBuffers();
// verify the expected number of requested buffers, and it would always request a new buffer
// while random emitting
assertEquals(2 * numberOfRecords, numberOfCreatedBuffers);
for (int i = 0; i < numberOfChannels; i++) {
// every channel would queue the number of above crated buffers
assertEquals(numberOfRecords + 1, partition.getNumberOfQueuedBuffers(i));
final int excessRandomRecords = i < numberOfRecords % numberOfChannels ? 1 : 0;
final int numberOfRandomRecords = numberOfRecords / numberOfChannels + excessRandomRecords;
final int numberOfTotalRecords = numberOfRecords + numberOfRandomRecords;
// verify the data correctness in every channel queue
verifyDeserializationResults(partition.createSubpartitionView(i, new NoOpBufferAvailablityListener()), deserializer, serializedRecords.get(i), numberOfRecords + 1, numberOfTotalRecords);
}
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class BroadcastRecordWriterTest method testRandomEmitAndBufferRecycling.
/**
* FLINK-17780: Tests that a shared buffer(or memory segment) of a buffer builder is only freed
* when all consumers are closed.
*/
@Test
public void testRandomEmitAndBufferRecycling() throws Exception {
int recordSize = 8;
int numberOfChannels = 2;
ResultPartition partition = createResultPartition(2 * recordSize, numberOfChannels);
BufferPool bufferPool = partition.getBufferPool();
BroadcastRecordWriter<SerializationTestType> writer = new BroadcastRecordWriter<>(partition, -1, "test");
// force materialization of both buffers for easier availability tests
List<Buffer> buffers = Arrays.asList(bufferPool.requestBuffer(), bufferPool.requestBuffer());
buffers.forEach(Buffer::recycleBuffer);
assertEquals(3, bufferPool.getNumberOfAvailableMemorySegments());
// fill first buffer
writer.broadcastEmit(new IntType(1));
writer.broadcastEmit(new IntType(2));
assertEquals(2, bufferPool.getNumberOfAvailableMemorySegments());
// simulate consumption of first buffer consumer; this should not free buffers
assertEquals(1, partition.getNumberOfQueuedBuffers(0));
ResultSubpartitionView view0 = partition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
closeConsumer(view0, 2 * recordSize);
assertEquals(2, bufferPool.getNumberOfAvailableMemorySegments());
// use second buffer
writer.emit(new IntType(3), 0);
assertEquals(1, bufferPool.getNumberOfAvailableMemorySegments());
// fully free first buffer
assertEquals(1, partition.getNumberOfQueuedBuffers(1));
ResultSubpartitionView view1 = partition.createSubpartitionView(1, new NoOpBufferAvailablityListener());
closeConsumer(view1, 2 * recordSize);
assertEquals(2, bufferPool.getNumberOfAvailableMemorySegments());
}
use of org.apache.flink.runtime.io.network.partition.NoOpBufferAvailablityListener in project flink by apache.
the class RecordWriterDelegateTest method verifyBroadcastEvent.
private void verifyBroadcastEvent(RecordWriterDelegate writerDelegate, List<ResultPartition> partitions) throws Exception {
final CancelCheckpointMarker message = new CancelCheckpointMarker(1);
writerDelegate.broadcastEvent(message);
// verify the added messages in all the queues
for (ResultPartition partition : partitions) {
for (int i = 0; i < partition.getNumberOfSubpartitions(); i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
BufferOrEvent boe = RecordWriterTest.parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(message, boe.getEvent());
}
}
}
Aggregations