use of io.netty.channel.ChannelHandler in project netty by netty.
the class ApplicationProtocolNegotiationHandlerTest method testBufferMessagesUntilHandshakeComplete.
private void testBufferMessagesUntilHandshakeComplete(final Consumer<ChannelHandlerContext> pipelineConfigurator) throws Exception {
final AtomicReference<byte[]> channelReadData = new AtomicReference<byte[]>();
final AtomicBoolean channelReadCompleteCalled = new AtomicBoolean(false);
ChannelHandler alpnHandler = new ApplicationProtocolNegotiationHandler(ApplicationProtocolNames.HTTP_1_1) {
@Override
protected void configurePipeline(ChannelHandlerContext ctx, String protocol) {
assertEquals(ApplicationProtocolNames.HTTP_1_1, protocol);
ctx.pipeline().addLast(new ChannelInboundHandlerAdapter() {
@Override
public void channelRead(ChannelHandlerContext ctx, Object msg) {
channelReadData.set((byte[]) msg);
}
@Override
public void channelReadComplete(ChannelHandlerContext ctx) {
channelReadCompleteCalled.set(true);
}
});
if (pipelineConfigurator != null) {
pipelineConfigurator.consume(ctx);
}
}
};
SSLEngine engine = SSLContext.getDefault().createSSLEngine();
// This test is mocked/simulated and doesn't go through full TLS handshake. Currently only JDK SSLEngineImpl
// client mode will generate a close_notify.
engine.setUseClientMode(true);
final byte[] someBytes = new byte[1024];
EmbeddedChannel channel = new EmbeddedChannel(new SslHandler(engine), new ChannelInboundHandlerAdapter() {
@Override
public void userEventTriggered(ChannelHandlerContext ctx, Object evt) {
if (evt == SslHandshakeCompletionEvent.SUCCESS) {
ctx.fireChannelRead(someBytes);
}
ctx.fireUserEventTriggered(evt);
}
}, alpnHandler);
channel.pipeline().fireUserEventTriggered(SslHandshakeCompletionEvent.SUCCESS);
assertNull(channel.pipeline().context(alpnHandler));
assertArrayEquals(someBytes, channelReadData.get());
assertTrue(channelReadCompleteCalled.get());
assertNull(channel.readInbound());
assertTrue(channel.finishAndReleaseAll());
}
use of io.netty.channel.ChannelHandler in project netty by netty.
the class ApplicationProtocolNegotiationHandlerTest method testHandshakeFailure.
@Test
public void testHandshakeFailure() {
ChannelHandler alpnHandler = new ApplicationProtocolNegotiationHandler(ApplicationProtocolNames.HTTP_1_1) {
@Override
protected void configurePipeline(ChannelHandlerContext ctx, String protocol) {
fail();
}
};
EmbeddedChannel channel = new EmbeddedChannel(alpnHandler);
SSLHandshakeException exception = new SSLHandshakeException("error");
SslHandshakeCompletionEvent completionEvent = new SslHandshakeCompletionEvent(exception);
channel.pipeline().fireUserEventTriggered(completionEvent);
channel.pipeline().fireExceptionCaught(new DecoderException(exception));
assertNull(channel.pipeline().context(alpnHandler));
assertFalse(channel.finishAndReleaseAll());
}
use of io.netty.channel.ChannelHandler in project netty by netty.
the class LocalTransportThreadModelTest3 method testConcurrentAddRemove.
private static void testConcurrentAddRemove(boolean inbound) throws Exception {
EventLoopGroup l = new DefaultEventLoopGroup(4, new DefaultThreadFactory("l"));
EventExecutorGroup e1 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e1"));
EventExecutorGroup e2 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e2"));
EventExecutorGroup e3 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e3"));
EventExecutorGroup e4 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e4"));
EventExecutorGroup e5 = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("e5"));
final EventExecutorGroup[] groups = { e1, e2, e3, e4, e5 };
try {
Deque<EventType> events = new ConcurrentLinkedDeque<EventType>();
final EventForwarder h1 = new EventForwarder();
final EventForwarder h2 = new EventForwarder();
final EventForwarder h3 = new EventForwarder();
final EventForwarder h4 = new EventForwarder();
final EventForwarder h5 = new EventForwarder();
final EventRecorder h6 = new EventRecorder(events, inbound);
final Channel ch = new LocalChannel();
if (!inbound) {
ch.config().setAutoRead(false);
}
ch.pipeline().addLast(e1, h1).addLast(e1, h2).addLast(e1, h3).addLast(e1, h4).addLast(e1, h5).addLast(e1, "recorder", h6);
l.register(ch).sync().channel().connect(localAddr).sync();
final LinkedList<EventType> expectedEvents = events(inbound, 8192);
Throwable cause = new Throwable();
Thread pipelineModifier = new Thread(new Runnable() {
@Override
public void run() {
Random random = new Random();
while (true) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
return;
}
if (!ch.isRegistered()) {
continue;
}
// EventForwardHandler forwardHandler = forwarders[random.nextInt(forwarders.length)];
ChannelHandler handler = ch.pipeline().removeFirst();
ch.pipeline().addBefore(groups[random.nextInt(groups.length)], "recorder", UUID.randomUUID().toString(), handler);
}
}
});
pipelineModifier.setDaemon(true);
pipelineModifier.start();
for (EventType event : expectedEvents) {
switch(event) {
case EXCEPTION_CAUGHT:
ch.pipeline().fireExceptionCaught(cause);
break;
case MESSAGE_RECEIVED:
ch.pipeline().fireChannelRead("");
break;
case MESSAGE_RECEIVED_LAST:
ch.pipeline().fireChannelReadComplete();
break;
case USER_EVENT:
ch.pipeline().fireUserEventTriggered("");
break;
case WRITE:
ch.pipeline().write("");
break;
case READ:
ch.pipeline().read();
break;
}
}
ch.close().sync();
while (events.peekLast() != EventType.UNREGISTERED) {
Thread.sleep(10);
}
expectedEvents.addFirst(EventType.ACTIVE);
expectedEvents.addFirst(EventType.REGISTERED);
expectedEvents.addLast(EventType.INACTIVE);
expectedEvents.addLast(EventType.UNREGISTERED);
for (; ; ) {
EventType event = events.poll();
if (event == null) {
assertTrue(expectedEvents.isEmpty(), "Missing events:" + expectedEvents);
break;
}
assertEquals(event, expectedEvents.poll());
}
} finally {
l.shutdownGracefully();
e1.shutdownGracefully();
e2.shutdownGracefully();
e3.shutdownGracefully();
e4.shutdownGracefully();
e5.shutdownGracefully();
l.terminationFuture().sync();
e1.terminationFuture().sync();
e2.terminationFuture().sync();
e3.terminationFuture().sync();
e4.terminationFuture().sync();
e5.terminationFuture().sync();
}
}
use of io.netty.channel.ChannelHandler in project grpc-java by grpc.
the class NettyClientTransport method start.
@SuppressWarnings("unchecked")
@Override
public Runnable start(Listener transportListener) {
lifecycleManager = new ClientTransportLifecycleManager(Preconditions.checkNotNull(transportListener, "listener"));
EventLoop eventLoop = group.next();
if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
keepAliveManager = new KeepAliveManager(new ClientKeepAlivePinger(this), eventLoop, keepAliveTimeNanos, keepAliveTimeoutNanos, keepAliveWithoutCalls);
}
handler = NettyClientHandler.newHandler(lifecycleManager, keepAliveManager, autoFlowControl, flowControlWindow, maxHeaderListSize, GrpcUtil.STOPWATCH_SUPPLIER, tooManyPingsRunnable, transportTracer, eagAttributes, authorityString, channelLogger);
ChannelHandler negotiationHandler = negotiator.newHandler(handler);
Bootstrap b = new Bootstrap();
b.option(ALLOCATOR, Utils.getByteBufAllocator(false));
b.group(eventLoop);
b.channelFactory(channelFactory);
// For non-socket based channel, the option will be ignored.
b.option(SO_KEEPALIVE, true);
// For non-epoll based channel, the option will be ignored.
if (keepAliveTimeNanos != KEEPALIVE_TIME_NANOS_DISABLED) {
ChannelOption<Integer> tcpUserTimeout = Utils.maybeGetTcpUserTimeoutOption();
if (tcpUserTimeout != null) {
b.option(tcpUserTimeout, (int) TimeUnit.NANOSECONDS.toMillis(keepAliveTimeoutNanos));
}
}
for (Map.Entry<ChannelOption<?>, ?> entry : channelOptions.entrySet()) {
// Every entry in the map is obtained from
// NettyChannelBuilder#withOption(ChannelOption<T> option, T value)
// so it is safe to pass the key-value pair to b.option().
b.option((ChannelOption<Object>) entry.getKey(), entry.getValue());
}
ChannelHandler bufferingHandler = new WriteBufferingAndExceptionHandler(negotiationHandler);
/**
* We don't use a ChannelInitializer in the client bootstrap because its "initChannel" method
* is executed in the event loop and we need this handler to be in the pipeline immediately so
* that it may begin buffering writes.
*/
b.handler(bufferingHandler);
ChannelFuture regFuture = b.register();
if (regFuture.isDone() && !regFuture.isSuccess()) {
channel = null;
// Initialization has failed badly. All new streams should be made to fail.
Throwable t = regFuture.cause();
if (t == null) {
t = new IllegalStateException("Channel is null, but future doesn't have a cause");
}
statusExplainingWhyTheChannelIsNull = Utils.statusFromThrowable(t);
// Use a Runnable since lifecycleManager calls transportListener
return new Runnable() {
@Override
public void run() {
// NOTICE: we not are calling lifecycleManager from the event loop. But there isn't really
// an event loop in this case, so nothing should be accessing the lifecycleManager. We
// could use GlobalEventExecutor (which is what regFuture would use for notifying
// listeners in this case), but avoiding on-demand thread creation in an error case seems
// a good idea and is probably clearer threading.
lifecycleManager.notifyTerminated(statusExplainingWhyTheChannelIsNull);
}
};
}
channel = regFuture.channel();
// Start the write queue as soon as the channel is constructed
handler.startWriteQueue(channel);
// This write will have no effect, yet it will only complete once the negotiationHandler
// flushes any pending writes. We need it to be staged *before* the `connect` so that
// the channel can't have been closed yet, removing all handlers. This write will sit in the
// AbstractBufferingHandler's buffer, and will either be flushed on a successful connection,
// or failed if the connection fails.
channel.writeAndFlush(NettyClientHandler.NOOP_MESSAGE).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
// Need to notify of this failure, because NettyClientHandler may not have been added to
// the pipeline before the error occurred.
lifecycleManager.notifyTerminated(Utils.statusFromThrowable(future.cause()));
}
}
});
// Start the connection operation to the server.
SocketAddress localAddress = localSocketPicker.createSocketAddress(remoteAddress, eagAttributes);
if (localAddress != null) {
channel.connect(remoteAddress, localAddress);
} else {
channel.connect(remoteAddress);
}
if (keepAliveManager != null) {
keepAliveManager.onTransportStarted();
}
return null;
}
use of io.netty.channel.ChannelHandler in project grpc-java by grpc.
the class AltsProtocolNegotiatorTest method setup.
@Before
public void setup() throws Exception {
ChannelHandler uncaughtExceptionHandler = new ChannelDuplexHandler() {
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception {
caughtException = cause;
super.exceptionCaught(ctx, cause);
ctx.close();
}
};
TsiHandshakerFactory handshakerFactory = new DelegatingTsiHandshakerFactory(FakeTsiHandshaker.clientHandshakerFactory()) {
@Override
public TsiHandshaker newHandshaker(String authority, ChannelLogger logger) {
return new DelegatingTsiHandshaker(super.newHandshaker(authority, logger)) {
@Override
public TsiPeer extractPeer() throws GeneralSecurityException {
return mockedTsiPeer;
}
@Override
public Object extractPeerObject() throws GeneralSecurityException {
return mockedAltsContext;
}
};
}
};
ManagedChannel fakeChannel = NettyChannelBuilder.forTarget("localhost:8080").build();
ObjectPool<Channel> fakeChannelPool = new FixedObjectPool<Channel>(fakeChannel);
LazyChannel lazyFakeChannel = new LazyChannel(fakeChannelPool);
ChannelHandler altsServerHandler = new ServerAltsProtocolNegotiator(handshakerFactory, lazyFakeChannel).newHandler(grpcHandler);
// On real server, WBAEH fires default ProtocolNegotiationEvent. KickNH provides this behavior.
ChannelHandler handler = new KickNegotiationHandler(altsServerHandler);
channel = new EmbeddedChannel(uncaughtExceptionHandler, handler);
}
Aggregations