use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class ChannelPersistenceITCase method testReadWritten.
@Test
public void testReadWritten() throws Exception {
byte[] inputChannelInfoData = randomBytes(1024);
byte[] resultSubpartitionInfoData = randomBytes(1024);
int partitionIndex = 0;
SequentialChannelStateReader reader = new SequentialChannelStateReaderImpl(toTaskStateSnapshot(write(1L, singletonMap(new InputChannelInfo(0, 0), inputChannelInfoData), singletonMap(new ResultSubpartitionInfo(partitionIndex, 0), resultSubpartitionInfoData))));
NetworkBufferPool networkBufferPool = new NetworkBufferPool(4, 1024);
try {
int numChannels = 1;
InputGate gate = buildGate(networkBufferPool, numChannels);
reader.readInputData(new InputGate[] { gate });
assertArrayEquals(inputChannelInfoData, collectBytes(gate::pollNext, BufferOrEvent::getBuffer));
BufferWritingResultPartition resultPartition = buildResultPartition(networkBufferPool, ResultPartitionType.PIPELINED, partitionIndex, numChannels);
reader.readOutputData(new BufferWritingResultPartition[] { resultPartition }, false);
ResultSubpartitionView view = resultPartition.createSubpartitionView(0, new NoOpBufferAvailablityListener());
assertArrayEquals(resultSubpartitionInfoData, collectBytes(() -> Optional.ofNullable(view.getNextBuffer()), BufferAndBacklog::buffer));
} finally {
networkBufferPool.destroy();
}
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class CancelPartitionRequestTest method testCancelPartitionRequest.
/**
* Verifies that requests for non-existing (failed/cancelled) input channels are properly
* cancelled. The receiver receives data, but there is no input channel to receive the data.
* This should cancel the request.
*/
@Test
public void testCancelPartitionRequest() throws Exception {
NettyServerAndClient serverAndClient = null;
try {
TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16);
ResultPartitionManager partitions = mock(ResultPartitionManager.class);
ResultPartitionID pid = new ResultPartitionID();
CountDownLatch sync = new CountDownLatch(1);
final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync));
// Return infinite subpartition
when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferAvailabilityListener.class))).thenAnswer(new Answer<ResultSubpartitionView>() {
@Override
public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable {
BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[2];
listener.notifyDataAvailable();
return view;
}
});
NettyProtocol protocol = new NettyProtocol(partitions, mock(TaskEventDispatcher.class));
serverAndClient = initServerAndClient(protocol);
Channel ch = connect(serverAndClient);
// Request for non-existing input channel => results in cancel request
ch.writeAndFlush(new PartitionRequest(pid, 0, new InputChannelID(), Integer.MAX_VALUE)).await();
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION.toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION.toMillis() + " ms to be notified about cancelled partition.");
}
verify(view, times(1)).releaseAllResources();
} finally {
shutdown(serverAndClient);
}
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class RecordWriterTest method testBroadcastEmitRecord.
/**
* Tests that records are broadcast via {@link RecordWriter#broadcastEmit(IOReadableWritable)}.
*/
@Test
public void testBroadcastEmitRecord() throws Exception {
final int numberOfChannels = 4;
final int bufferSize = 32;
final int numValues = 8;
final int serializationLength = 4;
final ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
final RecordWriter<SerializationTestType> writer = createRecordWriter(partition);
final RecordDeserializer<SerializationTestType> deserializer = new SpillingAdaptiveSpanningRecordDeserializer<>(new String[] { tempFolder.getRoot().getAbsolutePath() });
final ArrayDeque<SerializationTestType> serializedRecords = new ArrayDeque<>();
final Iterable<SerializationTestType> records = Util.randomRecords(numValues, SerializationTestTypeFactory.INT);
for (SerializationTestType record : records) {
serializedRecords.add(record);
writer.broadcastEmit(record);
}
final int numRequiredBuffers = numValues / (bufferSize / (4 + serializationLength));
if (isBroadcastWriter) {
assertEquals(numRequiredBuffers, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
} else {
assertEquals(numRequiredBuffers * numberOfChannels, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
}
for (int i = 0; i < numberOfChannels; i++) {
assertEquals(numRequiredBuffers, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
verifyDeserializationResults(view, deserializer, serializedRecords.clone(), numRequiredBuffers, numValues);
}
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class RecordWriterTest method testBroadcastEventNoRecords.
// ---------------------------------------------------------------------------------------------
// Resource release tests
// ---------------------------------------------------------------------------------------------
/**
* Tests broadcasting events when no records have been emitted yet.
*/
@Test
public void testBroadcastEventNoRecords() throws Exception {
int numberOfChannels = 4;
int bufferSize = 32;
ResultPartition partition = createResultPartition(bufferSize, numberOfChannels);
RecordWriter<ByteArrayIO> writer = createRecordWriter(partition);
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 919192L, Integer.MAX_VALUE + 18828228L, CheckpointOptions.forCheckpointWithDefaultLocation());
// No records emitted yet, broadcast should not request a buffer
writer.broadcastEvent(barrier);
assertEquals(0, partition.getBufferPool().bestEffortGetNumOfUsedBuffers());
for (int i = 0; i < numberOfChannels; i++) {
assertEquals(1, partition.getNumberOfQueuedBuffers(i));
ResultSubpartitionView view = partition.createSubpartitionView(i, new NoOpBufferAvailablityListener());
BufferOrEvent boe = parseBuffer(view.getNextBuffer().buffer(), i);
assertTrue(boe.isEvent());
assertEquals(barrier, boe.getEvent());
assertFalse(view.getAvailabilityAndBacklog(Integer.MAX_VALUE).isAvailable());
}
}
use of org.apache.flink.runtime.io.network.partition.ResultSubpartitionView in project flink by apache.
the class PartitionRequestQueueTest method testEnqueueReaderByNotifyingEventBuffer.
/**
* Tests {@link PartitionRequestQueue#enqueueAvailableReader(NetworkSequenceViewReader)},
* verifying the reader would be enqueued in the pipeline if the next sending buffer is an event
* even though it has no available credits.
*/
@Test
public void testEnqueueReaderByNotifyingEventBuffer() throws Exception {
// setup
final ResultSubpartitionView view = new NextIsEventResultSubpartitionView();
ResultPartitionProvider partitionProvider = (partitionId, index, availabilityListener) -> view;
final InputChannelID receiverId = new InputChannelID();
final PartitionRequestQueue queue = new PartitionRequestQueue();
final CreditBasedSequenceNumberingViewReader reader = new CreditBasedSequenceNumberingViewReader(receiverId, 0, queue);
final EmbeddedChannel channel = new EmbeddedChannel(queue);
reader.requestSubpartitionView(partitionProvider, new ResultPartitionID(), 0);
// block the channel so that we see an intermediate state in the test
ByteBuf channelBlockingBuffer = blockChannel(channel);
assertNull(channel.readOutbound());
// Notify an available event buffer to trigger enqueue the reader
reader.notifyDataAvailable();
channel.runPendingTasks();
// The reader is enqueued in the pipeline because the next buffer is an event, even though
// no credits are available
// contains only (this) one!
assertThat(queue.getAvailableReaders(), contains(reader));
assertEquals(0, reader.getNumCreditsAvailable());
// Flush the buffer to make the channel writable again and see the final results
channel.flush();
assertSame(channelBlockingBuffer, channel.readOutbound());
assertEquals(0, queue.getAvailableReaders().size());
assertEquals(0, reader.getNumCreditsAvailable());
assertNull(channel.readOutbound());
}
Aggregations