use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.
the class LocalInputChannelTest method testProducerFailedException.
@Test(expected = CancelTaskException.class)
public void testProducerFailedException() throws Exception {
ResultSubpartitionView view = mock(ResultSubpartitionView.class);
when(view.isReleased()).thenReturn(true);
when(view.getFailureCause()).thenReturn(new Exception("Expected test exception"));
ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
when(partitionManager.createSubpartitionView(any(ResultPartitionID.class), anyInt(), any(BufferProvider.class), any(BufferAvailabilityListener.class))).thenReturn(view);
SingleInputGate inputGate = mock(SingleInputGate.class);
BufferProvider bufferProvider = mock(BufferProvider.class);
when(inputGate.getBufferProvider()).thenReturn(bufferProvider);
LocalInputChannel ch = createLocalInputChannel(inputGate, partitionManager, new Tuple2<>(0, 0));
ch.requestSubpartition(0);
// Should throw an instance of CancelTaskException.
ch.getNextBuffer();
}
use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.
the class LocalInputChannelTest method testPartitionRequestExponentialBackoff.
@Test
public void testPartitionRequestExponentialBackoff() throws Exception {
// Config
Tuple2<Integer, Integer> backoff = new Tuple2<>(500, 3000);
// Start with initial backoff, then keep doubling, and cap at max.
int[] expectedDelays = { backoff._1(), 1000, 2000, backoff._2() };
// Setup
SingleInputGate inputGate = mock(SingleInputGate.class);
BufferProvider bufferProvider = mock(BufferProvider.class);
when(inputGate.getBufferProvider()).thenReturn(bufferProvider);
ResultPartitionManager partitionManager = mock(ResultPartitionManager.class);
LocalInputChannel ch = createLocalInputChannel(inputGate, partitionManager, backoff);
when(partitionManager.createSubpartitionView(eq(ch.partitionId), eq(0), eq(bufferProvider), any(BufferAvailabilityListener.class))).thenThrow(new PartitionNotFoundException(ch.partitionId));
Timer timer = mock(Timer.class);
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
((TimerTask) invocation.getArguments()[0]).run();
return null;
}
}).when(timer).schedule(any(TimerTask.class), anyLong());
// Initial request
ch.requestSubpartition(0);
verify(partitionManager).createSubpartitionView(eq(ch.partitionId), eq(0), eq(bufferProvider), any(BufferAvailabilityListener.class));
// Request subpartition and verify that the actual requests are delayed.
for (long expected : expectedDelays) {
ch.retriggerSubpartitionRequest(timer, 0);
verify(timer).schedule(any(TimerTask.class), eq(expected));
}
// Exception after backoff is greater than the maximum backoff.
try {
ch.retriggerSubpartitionRequest(timer, 0);
ch.getNextBuffer();
fail("Did not throw expected exception.");
} catch (Exception expected) {
}
}
use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.
the class PartitionRequestClientHandlerTest method testReceiveEmptyBuffer.
/**
* Tests a fix for FLINK-1761.
*
* <p> FLINK-1761 discovered an IndexOutOfBoundsException, when receiving buffers of size 0.
*/
@Test
public void testReceiveEmptyBuffer() throws Exception {
// Minimal mock of a remote input channel
final BufferProvider bufferProvider = mock(BufferProvider.class);
when(bufferProvider.requestBuffer()).thenReturn(TestBufferFactory.createBuffer());
final RemoteInputChannel inputChannel = mock(RemoteInputChannel.class);
when(inputChannel.getInputChannelId()).thenReturn(new InputChannelID());
when(inputChannel.getBufferProvider()).thenReturn(bufferProvider);
// An empty buffer of size 0
final Buffer emptyBuffer = TestBufferFactory.createBuffer();
emptyBuffer.setSize(0);
final BufferResponse receivedBuffer = createBufferResponse(emptyBuffer, 0, inputChannel.getInputChannelId());
final PartitionRequestClientHandler client = new PartitionRequestClientHandler();
client.addInputChannel(inputChannel);
// Read the empty buffer
client.channelRead(mock(ChannelHandlerContext.class), receivedBuffer);
// This should not throw an exception
verify(inputChannel, never()).onError(any(Throwable.class));
}
use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.
the class RecordWriterTest method testBroadcastEventMixedRecords.
/**
* Tests broadcasting events when records have been emitted. The emitted
* records cover all three {@link SerializationResult} types.
*/
@Test
public void testBroadcastEventMixedRecords() throws Exception {
Random rand = new XORShiftRandom();
int numChannels = 4;
int bufferSize = 32;
// serialized length
int lenBytes = 4;
@SuppressWarnings("unchecked") Queue<BufferOrEvent>[] queues = new Queue[numChannels];
for (int i = 0; i < numChannels; i++) {
queues[i] = new ArrayDeque<>();
}
BufferProvider bufferProvider = createBufferProvider(bufferSize);
ResultPartitionWriter partitionWriter = createCollectingPartitionWriter(queues, bufferProvider);
RecordWriter<ByteArrayIO> writer = new RecordWriter<>(partitionWriter, new RoundRobin<ByteArrayIO>());
CheckpointBarrier barrier = new CheckpointBarrier(Integer.MAX_VALUE + 1292L, Integer.MAX_VALUE + 199L, CheckpointOptions.forFullCheckpoint());
// Emit records on some channels first (requesting buffers), then
// broadcast the event. The record buffers should be emitted first, then
// the event. After the event, no new buffer should be requested.
// (i) Smaller than the buffer size (single buffer request => 1)
byte[] bytes = new byte[bufferSize / 2];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (ii) Larger than the buffer size (two buffer requests => 1 + 2)
bytes = new byte[bufferSize + 1];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iii) Exactly the buffer size (single buffer request => 1 + 2 + 1)
bytes = new byte[bufferSize - lenBytes];
rand.nextBytes(bytes);
writer.emit(new ByteArrayIO(bytes));
// (iv) Nothing on the 4th channel (no buffer request => 1 + 2 + 1 + 0 = 4)
// (v) Broadcast the event
writer.broadcastEvent(barrier);
verify(bufferProvider, times(4)).requestBufferBlocking();
// 1 buffer + 1 event
assertEquals(2, queues[0].size());
// 2 buffers + 1 event
assertEquals(3, queues[1].size());
// 1 buffer + 1 event
assertEquals(2, queues[2].size());
// 0 buffers + 1 event
assertEquals(1, queues[3].size());
}
use of org.apache.flink.runtime.io.network.buffer.BufferProvider in project flink by apache.
the class RecordWriterTest method createBufferProvider.
private BufferProvider createBufferProvider(final int bufferSize) throws IOException, InterruptedException {
BufferProvider bufferProvider = mock(BufferProvider.class);
when(bufferProvider.requestBufferBlocking()).thenAnswer(new Answer<Buffer>() {
@Override
public Buffer answer(InvocationOnMock invocationOnMock) throws Throwable {
MemorySegment segment = MemorySegmentFactory.allocateUnpooledSegment(bufferSize);
Buffer buffer = new Buffer(segment, DiscardingRecycler.INSTANCE);
return buffer;
}
});
return bufferProvider;
}
Aggregations