use of io.netty.channel.ChannelInboundHandlerAdapter in project netty by netty.
the class LocalChannelTest method testPeerWriteInWritePromiseCompleteSameEventLoopPreservesOrder.
@Test
public void testPeerWriteInWritePromiseCompleteSameEventLoopPreservesOrder() throws InterruptedException {
Bootstrap cb = new Bootstrap();
ServerBootstrap sb = new ServerBootstrap();
final CountDownLatch messageLatch = new CountDownLatch(2);
final ByteBuf data = Unpooled.wrappedBuffer(new byte[1024]);
final ByteBuf data2 = Unpooled.wrappedBuffer(new byte[512]);
final CountDownLatch serverChannelLatch = new CountDownLatch(1);
final AtomicReference<Channel> serverChannelRef = new AtomicReference<Channel>();
try {
cb.group(sharedGroup).channel(LocalChannel.class).handler(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (data2.equals(msg) && messageLatch.getCount() == 1) {
ReferenceCountUtil.safeRelease(msg);
messageLatch.countDown();
} else {
super.channelRead(ctx, msg);
}
}
});
sb.group(sharedGroup).channel(LocalServerChannel.class).childHandler(new ChannelInitializer<LocalChannel>() {
@Override
public void initChannel(LocalChannel ch) throws Exception {
ch.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
if (data.equals(msg) && messageLatch.getCount() == 2) {
ReferenceCountUtil.safeRelease(msg);
messageLatch.countDown();
} else {
super.channelRead(ctx, msg);
}
}
});
serverChannelRef.set(ch);
serverChannelLatch.countDown();
}
});
Channel sc = null;
Channel cc = null;
try {
// Start server
sc = sb.bind(TEST_ADDRESS).syncUninterruptibly().channel();
// Connect to the server
cc = cb.connect(sc.localAddress()).syncUninterruptibly().channel();
assertTrue(serverChannelLatch.await(5, SECONDS));
final Channel ccCpy = cc;
// Make sure a write operation is executed in the eventloop
cc.pipeline().lastContext().executor().execute(new Runnable() {
@Override
public void run() {
ChannelPromise promise = ccCpy.newPromise();
promise.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
Channel serverChannelCpy = serverChannelRef.get();
serverChannelCpy.writeAndFlush(data2.retainedDuplicate(), serverChannelCpy.newPromise());
}
});
ccCpy.writeAndFlush(data.retainedDuplicate(), promise);
}
});
assertTrue(messageLatch.await(5, SECONDS));
} finally {
closeChannel(cc);
closeChannel(sc);
}
} finally {
data.release();
data2.release();
}
}
use of io.netty.channel.ChannelInboundHandlerAdapter in project netty by netty.
the class SimpleChannelPoolTest method testBoundedChannelPoolSegment.
@Test
public void testBoundedChannelPoolSegment() throws Exception {
EventLoopGroup group = new LocalEventLoopGroup();
LocalAddress addr = new LocalAddress(LOCAL_ADDR_ID);
Bootstrap cb = new Bootstrap();
cb.remoteAddress(addr);
cb.group(group).channel(LocalChannel.class);
ServerBootstrap sb = new ServerBootstrap();
sb.group(group).channel(LocalServerChannel.class).childHandler(new ChannelInitializer<LocalChannel>() {
@Override
public void initChannel(LocalChannel ch) throws Exception {
ch.pipeline().addLast(new ChannelInboundHandlerAdapter());
}
});
// Start server
Channel sc = sb.bind(addr).sync().channel();
CountingChannelPoolHandler handler = new CountingChannelPoolHandler();
ChannelPool pool = new SimpleChannelPool(cb, handler, ChannelHealthChecker.ACTIVE) {
private final Queue<Channel> queue = new LinkedBlockingQueue<Channel>(1);
@Override
protected Channel pollChannel() {
return queue.poll();
}
@Override
protected boolean offerChannel(Channel ch) {
return queue.offer(ch);
}
};
Channel channel = pool.acquire().sync().getNow();
Channel channel2 = pool.acquire().sync().getNow();
pool.release(channel).syncUninterruptibly().getNow();
try {
pool.release(channel2).syncUninterruptibly();
fail();
} catch (IllegalStateException e) {
// expected
}
channel2.close().sync();
assertEquals(2, handler.channelCount());
assertEquals(0, handler.acquiredCount());
assertEquals(1, handler.releasedCount());
sc.close().sync();
channel.close().sync();
channel2.close().sync();
group.shutdownGracefully();
}
use of io.netty.channel.ChannelInboundHandlerAdapter in project flink by apache.
the class ClientTransportErrorHandlingTest method testExceptionOnRemoteClose.
/**
* Verifies that unexpected remote closes are reported as an instance of
* {@link RemoteTransportException}.
*/
@Test
public void testExceptionOnRemoteClose() throws Exception {
NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
return new ChannelHandler[] { // Close on read
new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ctx.channel().close();
}
} };
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
}
};
NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());
Channel ch = connect(serverAndClient);
PartitionRequestClientHandler handler = getClientHandler(ch);
// Create input channels
RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() };
final CountDownLatch sync = new CountDownLatch(rich.length);
Answer<Void> countDownLatch = new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
return null;
}
};
for (RemoteInputChannel r : rich) {
doAnswer(countDownLatch).when(r).onError(any(Throwable.class));
handler.addInputChannel(r);
}
// Write something to trigger close by server
ch.writeAndFlush(Unpooled.buffer().writerIndex(16));
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about remote connection close.");
}
// All the registered channels should be notified.
for (RemoteInputChannel r : rich) {
verify(r).onError(isA(RemoteTransportException.class));
}
shutdown(serverAndClient);
}
use of io.netty.channel.ChannelInboundHandlerAdapter in project flink by apache.
the class KvStateClientTest method testConcurrentQueries.
/**
* Multiple threads concurrently fire queries.
*/
@Test
public void testConcurrentQueries() throws Exception {
Deadline deadline = TEST_TIMEOUT.fromNow();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
ExecutorService executor = null;
KvStateClient client = null;
Channel serverChannel = null;
final byte[] serializedResult = new byte[1024];
ThreadLocalRandom.current().nextBytes(serializedResult);
try {
int numQueryTasks = 4;
final int numQueriesPerTask = 1024;
executor = Executors.newFixedThreadPool(numQueryTasks);
client = new KvStateClient(1, stats);
serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
ByteBuf buf = (ByteBuf) msg;
assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);
buf.release();
ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(ctx.alloc(), request.getRequestId(), serializedResult);
ctx.channel().writeAndFlush(response);
}
});
final KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
final KvStateClient finalClient = client;
Callable<List<Future<byte[]>>> queryTask = new Callable<List<Future<byte[]>>>() {
@Override
public List<Future<byte[]>> call() throws Exception {
List<Future<byte[]>> results = new ArrayList<>(numQueriesPerTask);
for (int i = 0; i < numQueriesPerTask; i++) {
results.add(finalClient.getKvState(serverAddress, new KvStateID(), new byte[0]));
}
return results;
}
};
// Submit query tasks
List<java.util.concurrent.Future<List<Future<byte[]>>>> futures = new ArrayList<>();
for (int i = 0; i < numQueryTasks; i++) {
futures.add(executor.submit(queryTask));
}
// Verify results
for (java.util.concurrent.Future<List<Future<byte[]>>> future : futures) {
List<Future<byte[]>> results = future.get(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
for (Future<byte[]> result : results) {
byte[] actual = Await.result(result, deadline.timeLeft());
assertArrayEquals(serializedResult, actual);
}
}
int totalQueries = numQueryTasks * numQueriesPerTask;
// Counts can take some time to propagate
while (deadline.hasTimeLeft() && stats.getNumSuccessful() != totalQueries) {
Thread.sleep(100);
}
assertEquals(totalQueries, stats.getNumRequests());
assertEquals(totalQueries, stats.getNumSuccessful());
} finally {
if (executor != null) {
executor.shutdown();
}
if (serverChannel != null) {
serverChannel.close();
}
if (client != null) {
client.shutDown();
}
assertEquals("Channel leak", 0, stats.getNumConnections());
}
}
use of io.netty.channel.ChannelInboundHandlerAdapter in project flink by apache.
the class KvStateClientTest method testSimpleRequests.
/**
* Tests simple queries, of which half succeed and half fail.
*/
@Test
public void testSimpleRequests() throws Exception {
Deadline deadline = TEST_TIMEOUT.fromNow();
AtomicKvStateRequestStats stats = new AtomicKvStateRequestStats();
KvStateClient client = null;
Channel serverChannel = null;
try {
client = new KvStateClient(1, stats);
// Random result
final byte[] expected = new byte[1024];
ThreadLocalRandom.current().nextBytes(expected);
final LinkedBlockingQueue<ByteBuf> received = new LinkedBlockingQueue<>();
final AtomicReference<Channel> channel = new AtomicReference<>();
serverChannel = createServerChannel(new ChannelInboundHandlerAdapter() {
@Override
public void channelActive(ChannelHandlerContext ctx) throws Exception {
channel.set(ctx.channel());
}
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
received.add((ByteBuf) msg);
}
});
KvStateServerAddress serverAddress = getKvStateServerAddress(serverChannel);
List<Future<byte[]>> futures = new ArrayList<>();
int numQueries = 1024;
for (int i = 0; i < numQueries; i++) {
futures.add(client.getKvState(serverAddress, new KvStateID(), new byte[0]));
}
// Respond to messages
Exception testException = new RuntimeException("Expected test Exception");
for (int i = 0; i < numQueries; i++) {
ByteBuf buf = received.poll(deadline.timeLeft().toMillis(), TimeUnit.MILLISECONDS);
assertNotNull("Receive timed out", buf);
Channel ch = channel.get();
assertNotNull("Channel not active", ch);
assertEquals(KvStateRequestType.REQUEST, KvStateRequestSerializer.deserializeHeader(buf));
KvStateRequest request = KvStateRequestSerializer.deserializeKvStateRequest(buf);
buf.release();
if (i % 2 == 0) {
ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestResult(serverChannel.alloc(), request.getRequestId(), expected);
ch.writeAndFlush(response);
} else {
ByteBuf response = KvStateRequestSerializer.serializeKvStateRequestFailure(serverChannel.alloc(), request.getRequestId(), testException);
ch.writeAndFlush(response);
}
}
for (int i = 0; i < numQueries; i++) {
if (i % 2 == 0) {
byte[] serializedResult = Await.result(futures.get(i), deadline.timeLeft());
assertArrayEquals(expected, serializedResult);
} else {
try {
Await.result(futures.get(i), deadline.timeLeft());
fail("Did not throw expected Exception");
} catch (RuntimeException ignored) {
// Expected
}
}
}
assertEquals(numQueries, stats.getNumRequests());
int expectedRequests = numQueries / 2;
// Counts can take some time to propagate
while (deadline.hasTimeLeft() && (stats.getNumSuccessful() != expectedRequests || stats.getNumFailed() != expectedRequests)) {
Thread.sleep(100);
}
assertEquals(expectedRequests, stats.getNumSuccessful());
assertEquals(expectedRequests, stats.getNumFailed());
} finally {
if (client != null) {
client.shutDown();
}
if (serverChannel != null) {
serverChannel.close();
}
assertEquals("Channel leak", 0, stats.getNumConnections());
}
}
Aggregations