use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class LocalInputChannelTest method testGetBufferFromLocalChannelWhenCompressionEnabled.
/**
* Verifies that buffer is not compressed when getting from a {@link LocalInputChannel}.
*/
@Test
public void testGetBufferFromLocalChannelWhenCompressionEnabled() throws Exception {
ResultSubpartitionView subpartitionView = InputChannelTestUtils.createResultSubpartitionView(true);
TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(subpartitionView);
LocalInputChannel channel = createLocalInputChannel(new SingleInputGateBuilder().build(), partitionManager);
// request partition and get next buffer
channel.requestSubpartition();
Optional<InputChannel.BufferAndAvailability> bufferAndAvailability = channel.getNextBuffer();
assertTrue(bufferAndAvailability.isPresent());
assertFalse(bufferAndAvailability.get().buffer().isCompressed());
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class PartitionRequestQueueTest method testProducerFailedException.
@Test
public void testProducerFailedException() throws Exception {
PartitionRequestQueue queue = new PartitionRequestQueue();
ResultPartitionProvider partitionProvider = mock(ResultPartitionProvider.class);
ResultPartitionID rpid = new ResultPartitionID();
BufferProvider bufferProvider = mock(BufferProvider.class);
ResultSubpartitionView view = mock(ResultSubpartitionView.class);
when(view.isReleased()).thenReturn(true);
when(view.getFailureCause()).thenReturn(new RuntimeException("Expected test exception"));
when(partitionProvider.createSubpartitionView(eq(rpid), eq(0), eq(bufferProvider), any(BufferAvailabilityListener.class))).thenReturn(view);
EmbeddedChannel ch = new EmbeddedChannel(queue);
SequenceNumberingViewReader seqView = new SequenceNumberingViewReader(new InputChannelID(), queue);
seqView.requestSubpartitionView(partitionProvider, rpid, 0, bufferProvider);
// Enqueue the erroneous view
queue.notifyReaderNonEmpty(seqView);
ch.runPendingTasks();
// Read the enqueued msg
Object msg = ch.readOutbound();
assertEquals(msg.getClass(), NettyMessage.ErrorResponse.class);
NettyMessage.ErrorResponse err = (NettyMessage.ErrorResponse) msg;
assertTrue(err.cause instanceof CancelTaskException);
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class LocalInputChannelTest method testEnqueueAvailableChannelWhenResuming.
@Test
public void testEnqueueAvailableChannelWhenResuming() throws IOException, InterruptedException {
PipelinedResultPartition parent = (PipelinedResultPartition) PartitionTestUtils.createPartition(ResultPartitionType.PIPELINED, NoOpFileChannelManager.INSTANCE);
ResultSubpartition subpartition = parent.getAllPartitions()[0];
ResultSubpartitionView subpartitionView = subpartition.createReadView(() -> {
});
TestingResultPartitionManager partitionManager = new TestingResultPartitionManager(subpartitionView);
LocalInputChannel channel = createLocalInputChannel(new SingleInputGateBuilder().build(), partitionManager);
channel.requestSubpartition();
// Block the subpartition
subpartition.add(EventSerializer.toBufferConsumer(new CheckpointBarrier(1, 1, CheckpointOptions.forCheckpointWithDefaultLocation()), false));
assertTrue(channel.getNextBuffer().isPresent());
// Add more data
subpartition.add(createFilledFinishedBufferConsumer(4096));
subpartition.flush();
// No buffer since the subpartition is blocked.
assertFalse(channel.inputGate.pollNext().isPresent());
// Resumption makes the subpartition available.
channel.resumeConsumption();
Optional<BufferOrEvent> nextBuffer = channel.inputGate.pollNext();
assertTrue(nextBuffer.isPresent());
assertTrue(nextBuffer.get().isBuffer());
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class LocalInputChannel method getNextBuffer.
@Override
Optional<BufferAndAvailability> getNextBuffer() throws IOException {
checkError();
ResultSubpartitionView subpartitionView = this.subpartitionView;
if (subpartitionView == null) {
// during) it was released during reading the EndOfPartitionEvent (2).
if (isReleased) {
return Optional.empty();
}
// this can happen if the request for the partition was triggered asynchronously
// by the time trigger
// would be good to avoid that, by guaranteeing that the requestPartition() and
// getNextBuffer() always come from the same thread
// we could do that by letting the timer insert a special "requesting channel" into the
// input gate's queue
subpartitionView = checkAndWaitForSubpartitionView();
}
BufferAndBacklog next = subpartitionView.getNextBuffer();
// ignore the empty buffer directly
while (next != null && next.buffer().readableBytes() == 0) {
next.buffer().recycleBuffer();
next = subpartitionView.getNextBuffer();
numBuffersIn.inc();
}
if (next == null) {
if (subpartitionView.isReleased()) {
throw new CancelTaskException("Consumed partition " + subpartitionView + " has been released.");
} else {
return Optional.empty();
}
}
Buffer buffer = next.buffer();
if (buffer instanceof FileRegionBuffer) {
buffer = ((FileRegionBuffer) buffer).readInto(inputGate.getUnpooledSegment());
}
numBytesIn.inc(buffer.getSize());
numBuffersIn.inc();
channelStatePersister.checkForBarrier(buffer);
channelStatePersister.maybePersist(buffer);
NetworkActionsLogger.traceInput("LocalInputChannel#getNextBuffer", buffer, inputGate.getOwningTaskName(), channelInfo, channelStatePersister, next.getSequenceNumber());
return Optional.of(new BufferAndAvailability(buffer, next.getNextDataType(), next.buffersInBacklog(), next.getSequenceNumber()));
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class LocalInputChannel method resumeConsumption.
@Override
public void resumeConsumption() {
checkState(!isReleased, "Channel released.");
ResultSubpartitionView subpartitionView = checkNotNull(this.subpartitionView);
subpartitionView.resumeConsumption();
if (subpartitionView.getAvailabilityAndBacklog(Integer.MAX_VALUE).isAvailable()) {
notifyChannelNonEmpty();
}
}
Aggregations