use of org.apache.flink.runtime.io.network.partition.ResultPartitionProvider in project flink by apache.
the class PartitionRequestQueueTest method testEnqueueReaderByResumingConsumption.
/**
* Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
* verifying the reader would be enqueued in the pipeline after resuming data consumption if
* there are credit and data available.
*/
@Test
public void testEnqueueReaderByResumingConsumption() throws Exception {
PipelinedSubpartition subpartition = PipelinedSubpartitionTest.createPipelinedSubpartition();
Buffer.DataType dataType1 = Buffer.DataType.ALIGNED_CHECKPOINT_BARRIER;
Buffer.DataType dataType2 = Buffer.DataType.DATA_BUFFER;
subpartition.add(createEventBufferConsumer(4096, dataType1));
subpartition.add(createEventBufferConsumer(4096, dataType2));
BufferAvailabilityListener bufferAvailabilityListener = new NoOpBufferAvailablityListener();
PipelinedSubpartitionView view = subpartition.createReadView(bufferAvailabilityListener);
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view;
InputChannelID receiverId = new InputChannelID();
PartitionRequestQueue queue = new PartitionRequestQueue();
CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 2, queue);
EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
queue.notifyReaderCreated(reader);
assertTrue(reader.getAvailabilityAndBacklog().isAvailable());
reader.notifyDataAvailable();
channel.runPendingTasks();
assertFalse(reader.getAvailabilityAndBacklog().isAvailable());
assertEquals(1, subpartition.unsynchronizedGetNumberOfQueuedBuffers());
queue.addCreditOrResumeConsumption(receiverId, NetworkSequenceViewReader::resumeConsumption);
assertFalse(reader.getAvailabilityAndBacklog().isAvailable());
assertEquals(0, subpartition.unsynchronizedGetNumberOfQueuedBuffers());
Object data1 = channel.readOutbound();
assertEquals(dataType1, ((NettyMessage.BufferResponse) data1).buffer.getDataType());
Object data2 = channel.readOutbound();
assertEquals(dataType2, ((NettyMessage.BufferResponse) data2).buffer.getDataType());
}
use of org.apache.flink.runtime.io.network.partition.ResultPartitionProvider in project flink by apache.
the class PartitionRequestQueueTest method testAnnounceBacklog.
@Test
public void testAnnounceBacklog() throws Exception {
PipelinedSubpartition subpartition = PipelinedSubpartitionTest.createPipelinedSubpartition();
subpartition.add(createEventBufferConsumer(4096, Buffer.DataType.DATA_BUFFER));
subpartition.add(createEventBufferConsumer(4096, Buffer.DataType.DATA_BUFFER));
PipelinedSubpartitionView view = subpartition.createReadView(new NoOpBufferAvailablityListener());
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view;
PartitionRequestQueue queue = new PartitionRequestQueue();
InputChannelID receiverId = new InputChannelID();
CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
queue.notifyReaderCreated(reader);
reader.notifyDataAvailable();
channel.runPendingTasks();
Object data = channel.readOutbound();
assertTrue(data instanceof NettyMessage.BacklogAnnouncement);
NettyMessage.BacklogAnnouncement announcement = (NettyMessage.BacklogAnnouncement) data;
assertEquals(receiverId, announcement.receiverId);
assertEquals(subpartition.getBuffersInBacklogUnsafe(), announcement.backlog);
subpartition.release();
reader.notifyDataAvailable();
channel.runPendingTasks();
assertNotNull(channel.readOutbound());
}
use of org.apache.flink.runtime.io.network.partition.ResultPartitionProvider in project flink by apache.
the class PartitionRequestServerHandlerTest method testAcknowledgeAllRecordsProcessed.
@Test
public void testAcknowledgeAllRecordsProcessed() throws IOException {
InputChannelID inputChannelID = new InputChannelID();
ResultPartition resultPartition = PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED_BOUNDED);
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> resultPartition.createSubpartitionView(index, availabilityListener);
// Creates the netty network handler stack.
PartitionRequestQueue partitionRequestQueue = new PartitionRequestQueue();
final PartitionRequestServerHandler serverHandler = new PartitionRequestServerHandler(new ResultPartitionManager(), new TaskEventDispatcher(), partitionRequestQueue);
final EmbeddedChannel channel = new EmbeddedChannel(serverHandler, partitionRequestQueue);
// Creates and registers the view to netty.
NetworkSequenceViewReader viewReader = new CreditBasedSequenceNumberingViewReader(inputChannelID, 2, partitionRequestQueue);
viewReader.requestSubpartitionView(partitionProvider, resultPartition.getPartitionId(), 0);
partitionRequestQueue.notifyReaderCreated(viewReader);
// Write the message to acknowledge all records are processed to server
resultPartition.notifyEndOfData(StopMode.DRAIN);
CompletableFuture<Void> allRecordsProcessedFuture = resultPartition.getAllDataProcessedFuture();
assertFalse(allRecordsProcessedFuture.isDone());
channel.writeInbound(new NettyMessage.AckAllUserRecordsProcessed(inputChannelID));
channel.runPendingTasks();
assertTrue(allRecordsProcessedFuture.isDone());
assertFalse(allRecordsProcessedFuture.isCompletedExceptionally());
}
Aggregations