use of org.apache.flink.runtime.io.network.NetworkSequenceViewReader in project flink by apache.
the class PartitionRequestServerHandler method channelRead0.
@Override
protected void channelRead0(ChannelHandlerContext ctx, NettyMessage msg) throws Exception {
try {
Class<?> msgClazz = msg.getClass();
// ----------------------------------------------------------------
if (msgClazz == PartitionRequest.class) {
PartitionRequest request = (PartitionRequest) msg;
LOG.debug("Read channel on {}: {}.", ctx.channel().localAddress(), request);
try {
NetworkSequenceViewReader reader;
reader = new CreditBasedSequenceNumberingViewReader(request.receiverId, request.credit, outboundQueue);
reader.requestSubpartitionView(partitionProvider, request.partitionId, request.queueIndex);
outboundQueue.notifyReaderCreated(reader);
} catch (PartitionNotFoundException notFound) {
respondWithError(ctx, notFound, request.receiverId);
}
} else // ----------------------------------------------------------------
if (msgClazz == TaskEventRequest.class) {
TaskEventRequest request = (TaskEventRequest) msg;
if (!taskEventPublisher.publish(request.partitionId, request.event)) {
respondWithError(ctx, new IllegalArgumentException("Task event receiver not found."), request.receiverId);
}
} else if (msgClazz == CancelPartitionRequest.class) {
CancelPartitionRequest request = (CancelPartitionRequest) msg;
outboundQueue.cancel(request.receiverId);
} else if (msgClazz == CloseRequest.class) {
outboundQueue.close();
} else if (msgClazz == AddCredit.class) {
AddCredit request = (AddCredit) msg;
outboundQueue.addCreditOrResumeConsumption(request.receiverId, reader -> reader.addCredit(request.credit));
} else if (msgClazz == ResumeConsumption.class) {
ResumeConsumption request = (ResumeConsumption) msg;
outboundQueue.addCreditOrResumeConsumption(request.receiverId, NetworkSequenceViewReader::resumeConsumption);
} else if (msgClazz == AckAllUserRecordsProcessed.class) {
AckAllUserRecordsProcessed request = (AckAllUserRecordsProcessed) msg;
outboundQueue.acknowledgeAllRecordsProcessed(request.receiverId);
} else if (msgClazz == NewBufferSize.class) {
NewBufferSize request = (NewBufferSize) msg;
outboundQueue.notifyNewBufferSize(request.receiverId, request.bufferSize);
} else {
LOG.warn("Received unexpected client request: {}", msg);
}
} catch (Throwable t) {
respondWithError(ctx, t);
}
}
use of org.apache.flink.runtime.io.network.NetworkSequenceViewReader in project flink by apache.
the class PartitionRequestQueue method addCreditOrResumeConsumption.
/**
* Adds unannounced credits from the consumer or resumes data consumption after an exactly-once
* checkpoint and enqueues the corresponding reader for this consumer (if not enqueued yet).
*
* @param receiverId The input channel id to identify the consumer.
* @param operation The operation to be performed (add credit or resume data consumption).
*/
void addCreditOrResumeConsumption(InputChannelID receiverId, Consumer<NetworkSequenceViewReader> operation) throws Exception {
if (fatalError) {
return;
}
NetworkSequenceViewReader reader = obtainReader(receiverId);
operation.accept(reader);
enqueueAvailableReader(reader);
}
use of org.apache.flink.runtime.io.network.NetworkSequenceViewReader in project flink by apache.
the class PartitionRequestQueueTest method testEnqueueReaderByNotifyingBufferAndCredit.
/**
* Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
* verifying the reader would be enqueued in the pipeline iff it has both available credits and
* buffers.
*/
@Test
public void testEnqueueReaderByNotifyingBufferAndCredit() throws Exception {
// setup
final ResultSubpartitionView view = new DefaultBufferResultSubpartitionView(10);
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view;
final InputChannelID receiverId = new InputChannelID();
final PartitionRequestQueue queue = new PartitionRequestQueue();
final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 2, queue);
final EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.addCredit(-2);
reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
queue.notifyReaderCreated(reader);
// block the channel so that we see an intermediate state in the test
ByteBuf channelBlockingBuffer = blockChannel(channel);
assertNull(channel.readOutbound());
// Notify available buffers to trigger enqueue the reader
final int notifyNumBuffers = 5;
for (int i = 0; i < notifyNumBuffers; i++) {
reader.notifyDataAvailable();
}
channel.runPendingTasks();
// the reader is not enqueued in the pipeline because no credits are available
// -> it should still have the same number of pending buffers
assertEquals(0, queue.getAvailableReaders().size());
assertTrue(reader.hasBuffersAvailable().isAvailable());
assertFalse(reader.isRegisteredAsAvailable());
assertEquals(0, reader.getNumCreditsAvailable());
// Notify available credits to trigger enqueue the reader again
final int notifyNumCredits = 3;
for (int i = 1; i <= notifyNumCredits; i++) {
queue.addCreditOrResumeConsumption(receiverId, viewReader -> viewReader.addCredit(1));
// the reader is enqueued in the pipeline because it has both available buffers and
// credits
// since the channel is blocked though, we will not process anything and only enqueue
// the
// reader once
assertTrue(reader.isRegisteredAsAvailable());
// contains only (this) one!
assertThat(queue.getAvailableReaders(), contains(reader));
assertEquals(i, reader.getNumCreditsAvailable());
assertTrue(reader.hasBuffersAvailable().isAvailable());
}
// Flush the buffer to make the channel writable again and see the final results
channel.flush();
assertSame(channelBlockingBuffer, channel.readOutbound());
assertEquals(0, queue.getAvailableReaders().size());
assertEquals(0, reader.getNumCreditsAvailable());
assertTrue(reader.hasBuffersAvailable().isAvailable());
assertFalse(reader.isRegisteredAsAvailable());
for (int i = 1; i <= notifyNumCredits; i++) {
assertThat(channel.readOutbound(), instanceOf(NettyMessage.BufferResponse.class));
}
assertNull(channel.readOutbound());
}
use of org.apache.flink.runtime.io.network.NetworkSequenceViewReader in project flink by apache.
the class PartitionRequestQueue method writeAndFlushNextMessageIfPossible.
private void writeAndFlushNextMessageIfPossible(final Channel channel) throws IOException {
if (fatalError || !channel.isWritable()) {
return;
}
// The logic here is very similar to the combined input gate and local
// input channel logic. You can think of this class acting as the input
// gate and the consumed views as the local input channels.
BufferAndAvailability next = null;
try {
while (true) {
NetworkSequenceViewReader reader = pollAvailableReader();
// of the write callbacks that are executed after each write.
if (reader == null) {
return;
}
next = reader.getNextBuffer();
if (next == null) {
if (!reader.isReleased()) {
continue;
}
Throwable cause = reader.getFailureCause();
if (cause != null) {
ErrorResponse msg = new ErrorResponse(cause, reader.getReceiverId());
ctx.writeAndFlush(msg);
}
} else {
// We re-add it into the queue if it is still available
if (next.moreAvailable()) {
registerAvailableReader(reader);
}
BufferResponse msg = new BufferResponse(next.buffer(), next.getSequenceNumber(), reader.getReceiverId(), next.buffersInBacklog());
// Write and flush and wait until this is done before
// trying to continue with the next buffer.
channel.writeAndFlush(msg).addListener(writeListener);
return;
}
}
} catch (Throwable t) {
if (next != null) {
next.buffer().recycleBuffer();
}
throw new IOException(t.getMessage(), t);
}
}
use of org.apache.flink.runtime.io.network.NetworkSequenceViewReader in project flink by apache.
the class PartitionRequestQueue method releaseAllResources.
private void releaseAllResources() throws IOException {
// note: this is only ever executed by one thread: the Netty IO thread!
for (NetworkSequenceViewReader reader : allReaders.values()) {
releaseViewReader(reader);
}
availableReaders.clear();
allReaders.clear();
}
Aggregations