use of org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient in project flink by apache.
the class CancelPartitionRequestTest method testDuplicateCancel.
@Test
public void testDuplicateCancel() throws Exception {
NettyServerAndClient serverAndClient = null;
try {
final TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16);
ResultPartitionManager partitions = mock(ResultPartitionManager.class);
ResultPartitionID pid = new ResultPartitionID();
final CountDownLatch sync = new CountDownLatch(1);
final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync));
// Return infinite subpartition
when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferProvider.class), any(BufferAvailabilityListener.class))).thenAnswer(new Answer<ResultSubpartitionView>() {
@Override
public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable {
BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[3];
listener.notifyBuffersAvailable(Long.MAX_VALUE);
return view;
}
});
PartitionRequestProtocol protocol = new PartitionRequestProtocol(partitions, mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
serverAndClient = initServerAndClient(protocol);
Channel ch = connect(serverAndClient);
// Request for non-existing input channel => results in cancel request
InputChannelID inputChannelId = new InputChannelID();
ch.writeAndFlush(new PartitionRequest(pid, 0, inputChannelId)).await();
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about cancelled partition.");
}
ch.writeAndFlush(new CancelPartitionRequest(inputChannelId)).await();
ch.close();
NettyTestUtil.awaitClose(ch);
verify(view, times(1)).releaseAllResources();
verify(view, times(0)).notifySubpartitionConsumed();
} finally {
shutdown(serverAndClient);
}
}
use of org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient in project flink by apache.
the class ClientTransportErrorHandlingTest method testExceptionOnWrite.
/**
* Verifies that failed client requests via {@link PartitionRequestClient} are correctly
* attributed to the respective {@link RemoteInputChannel}.
*/
@Test
public void testExceptionOnWrite() throws Exception {
NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
return new ChannelHandler[0];
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
}
};
// We need a real server and client in this test, because Netty's EmbeddedChannel is
// not failing the ChannelPromise of failed writes.
NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());
Channel ch = connect(serverAndClient);
PartitionRequestClientHandler handler = getClientHandler(ch);
// Last outbound handler throws Exception after 1st write
ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() {
int writeNum = 0;
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (writeNum >= 1) {
throw new RuntimeException("Expected test exception.");
}
writeNum++;
ctx.write(msg, promise);
}
});
PartitionRequestClient requestClient = new PartitionRequestClient(ch, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class));
// Create input channels
RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() };
final CountDownLatch sync = new CountDownLatch(1);
// Do this with explicit synchronization. Otherwise this is not robust against slow timings
// of the callback (e.g. we cannot just verify that it was called once, because there is
// a chance that we do this too early).
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
return null;
}
}).when(rich[1]).onError(isA(LocalTransportException.class));
// First request is successful
ChannelFuture f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[0], 0);
assertTrue(f.await().isSuccess());
// Second request is *not* successful
f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[1], 0);
assertFalse(f.await().isSuccess());
// Only the second channel should be notified about the error
verify(rich[0], times(0)).onError(any(LocalTransportException.class));
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about the channel error.");
}
shutdown(serverAndClient);
}
use of org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient in project flink by apache.
the class NettyServerLowAndHighWatermarkTest method testLowAndHighWatermarks.
/**
* Verifies that the high and low watermark are set in relation to the page size.
*
* <p> The high and low water marks control the data flow to the wire. If the Netty write buffer
* has size greater or equal to the high water mark, the channel state becomes not-writable.
* Only when the size falls below the low water mark again, the state changes to writable again.
*
* <p> The Channel writability state needs to be checked by the handler when writing to the
* channel and is not enforced in the sense that you cannot write a channel, which is in
* not-writable state.
*/
@Test
public void testLowAndHighWatermarks() throws Throwable {
final AtomicReference<Throwable> error = new AtomicReference<Throwable>();
final NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
// The channel handler implements the test
return new ChannelHandler[] { new TestLowAndHighWatermarkHandler(error) };
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new ChannelHandler[0];
}
};
final NettyConfig conf = createConfig(PageSize);
final NettyServerAndClient serverAndClient = initServerAndClient(protocol, conf);
try {
// We can't just check the config of this channel as it is the client's channel. We need
// to check the server channel, because it is doing the data transfers.
final Channel ch = connect(serverAndClient);
// Wait for the channel to be closed
awaitClose(ch);
final Throwable t = error.get();
if (t != null) {
throw t;
}
} finally {
shutdown(serverAndClient);
}
}
use of org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient in project flink by apache.
the class CancelPartitionRequestTest method testCancelPartitionRequest.
/**
* Verifies that requests for non-existing (failed/cancelled) input channels are properly
* cancelled. The receiver receives data, but there is no input channel to receive the data.
* This should cancel the request.
*/
@Test
public void testCancelPartitionRequest() throws Exception {
NettyServerAndClient serverAndClient = null;
try {
TestPooledBufferProvider outboundBuffers = new TestPooledBufferProvider(16);
ResultPartitionManager partitions = mock(ResultPartitionManager.class);
ResultPartitionID pid = new ResultPartitionID();
CountDownLatch sync = new CountDownLatch(1);
final ResultSubpartitionView view = spy(new InfiniteSubpartitionView(outboundBuffers, sync));
// Return infinite subpartition
when(partitions.createSubpartitionView(eq(pid), eq(0), any(BufferProvider.class), any(BufferAvailabilityListener.class))).thenAnswer(new Answer<ResultSubpartitionView>() {
@Override
public ResultSubpartitionView answer(InvocationOnMock invocationOnMock) throws Throwable {
BufferAvailabilityListener listener = (BufferAvailabilityListener) invocationOnMock.getArguments()[3];
listener.notifyBuffersAvailable(Long.MAX_VALUE);
return view;
}
});
PartitionRequestProtocol protocol = new PartitionRequestProtocol(partitions, mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class));
serverAndClient = initServerAndClient(protocol);
Channel ch = connect(serverAndClient);
// Request for non-existing input channel => results in cancel request
ch.writeAndFlush(new PartitionRequest(pid, 0, new InputChannelID())).await();
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about cancelled partition.");
}
verify(view, times(1)).releaseAllResources();
verify(view, times(0)).notifySubpartitionConsumed();
} finally {
shutdown(serverAndClient);
}
}
use of org.apache.flink.runtime.io.network.netty.NettyTestUtil.NettyServerAndClient in project flink by apache.
the class ClientTransportErrorHandlingTest method testExceptionOnRemoteClose.
/**
* Verifies that unexpected remote closes are reported as an instance of
* {@link RemoteTransportException}.
*/
@Test
public void testExceptionOnRemoteClose() throws Exception {
NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
return new ChannelHandler[] { // Close on read
new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ctx.channel().close();
}
} };
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
}
};
NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());
Channel ch = connect(serverAndClient);
PartitionRequestClientHandler handler = getClientHandler(ch);
// Create input channels
RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() };
final CountDownLatch sync = new CountDownLatch(rich.length);
Answer<Void> countDownLatch = new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
return null;
}
};
for (RemoteInputChannel r : rich) {
doAnswer(countDownLatch).when(r).onError(any(Throwable.class));
handler.addInputChannel(r);
}
// Write something to trigger close by server
ch.writeAndFlush(Unpooled.buffer().writerIndex(16));
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about remote connection close.");
}
// All the registered channels should be notified.
for (RemoteInputChannel r : rich) {
verify(r).onError(isA(RemoteTransportException.class));
}
shutdown(serverAndClient);
}
Aggregations