use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project netty by netty.
the class DatagramUnicastTest method testSimpleSend0.
@SuppressWarnings("deprecation")
private void testSimpleSend0(Bootstrap sb, Bootstrap cb, ByteBuf buf, boolean bindClient, final byte[] bytes, int count) throws Throwable {
final CountDownLatch latch = new CountDownLatch(count);
sb.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.pipeline().addLast(new SimpleChannelInboundHandler<DatagramPacket>() {
@Override
public void channelRead0(ChannelHandlerContext ctx, DatagramPacket msg) throws Exception {
ByteBuf buf = msg.content();
assertEquals(bytes.length, buf.readableBytes());
for (byte b : bytes) {
assertEquals(b, buf.readByte());
}
latch.countDown();
}
});
}
});
cb.handler(new SimpleChannelInboundHandler<Object>() {
@Override
public void channelRead0(ChannelHandlerContext ctx, Object msgs) throws Exception {
// Nothing will be sent.
}
});
Channel sc = null;
BindException bindFailureCause = null;
for (int i = 0; i < 3; i++) {
try {
sc = sb.bind().sync().channel();
break;
} catch (Exception e) {
if (e instanceof BindException) {
logger.warn("Failed to bind to a free port; trying again", e);
bindFailureCause = (BindException) e;
refreshLocalAddress(sb);
} else {
throw e;
}
}
}
if (sc == null) {
throw bindFailureCause;
}
Channel cc;
if (bindClient) {
cc = cb.bind().sync().channel();
} else {
cb.option(ChannelOption.DATAGRAM_CHANNEL_ACTIVE_ON_REGISTRATION, true);
cc = cb.register().sync().channel();
}
for (int i = 0; i < count; i++) {
cc.write(new DatagramPacket(buf.retain().duplicate(), addr));
}
// release as we used buf.retain() before
buf.release();
cc.flush();
assertTrue(latch.await(10, TimeUnit.SECONDS));
sc.close().sync();
cc.close().sync();
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project netty by netty.
the class NioSocketChannelTest method testChannelReRegisterRead.
private static void testChannelReRegisterRead(final boolean sameEventLoop) throws Exception {
final EventLoopGroup group = new NioEventLoopGroup(2);
final CountDownLatch latch = new CountDownLatch(1);
// Just some random bytes
byte[] bytes = new byte[1024];
PlatformDependent.threadLocalRandom().nextBytes(bytes);
Channel sc = null;
Channel cc = null;
ServerBootstrap b = new ServerBootstrap();
try {
b.group(group).channel(NioServerSocketChannel.class).childOption(ChannelOption.SO_KEEPALIVE, true).childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
pipeline.addLast(new SimpleChannelInboundHandler<ByteBuf>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, ByteBuf byteBuf) {
// We was able to read something from the Channel after reregister.
latch.countDown();
}
@Override
public void channelActive(final ChannelHandlerContext ctx) throws Exception {
final EventLoop loop = group.next();
if (sameEventLoop) {
deregister(ctx, loop);
} else {
loop.execute(new Runnable() {
@Override
public void run() {
deregister(ctx, loop);
}
});
}
}
private void deregister(ChannelHandlerContext ctx, final EventLoop loop) {
// As soon as the channel becomes active re-register it to another
// EventLoop. After this is done we should still receive the data that
// was written to the channel.
ctx.deregister().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture cf) {
Channel channel = cf.channel();
assertNotSame(loop, channel.eventLoop());
group.next().register(channel);
}
});
}
});
}
});
sc = b.bind(0).syncUninterruptibly().channel();
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(group).channel(NioSocketChannel.class);
bootstrap.handler(new ChannelInboundHandlerAdapter());
cc = bootstrap.connect(sc.localAddress()).syncUninterruptibly().channel();
cc.writeAndFlush(Unpooled.wrappedBuffer(bytes)).syncUninterruptibly();
latch.await();
} finally {
if (cc != null) {
cc.close();
}
if (sc != null) {
sc.close();
}
group.shutdownGracefully();
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.
protected void prepChannelForDownstreamCall(ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) throws Exception {
try {
// the call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (msg instanceof LastHttpContent) {
lastChunkSentDownstreamHolder.heldObject = true;
if (performSubSpanAroundDownstreamCalls) {
// Complete the subspan.
runnableWithTracingAndMdc(() -> {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}, distributedSpanStackToUse, mdcContextToUse).run();
}
}
HttpObject msgToPass = msg;
if (msg instanceof HttpResponse) {
// We can't pass the original HttpResponse back to the callback due to intricacies of how
// Netty handles determining the last chunk. If we do, and the callback ends up writing
// the message out to the client (which happens during proxy routing for example), then
// msg's headers might get modified - potentially causing this channel pipeline to
// never send a LastHttpContent, which will in turn cause an indefinite hang.
HttpResponse origHttpResponse = (HttpResponse) msg;
HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus());
httpResponse.headers().add(origHttpResponse.headers());
msgToPass = httpResponse;
}
callback.messageReceived(msgToPass);
} else {
if (shouldLogBadMessagesAfterRequestFinishes) {
runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", String.valueOf(msg)), distributedSpanStackToUse, mdcContextToUse).run();
}
}
} finally {
if (msg instanceof LastHttpContent) {
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
}
}
}
};
Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
// call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (performSubSpanAroundDownstreamCalls) {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}
Tracer.getInstance().unregisterFromThread();
if (cause instanceof Errors.NativeIoException) {
// NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
// Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
// connection and a second attempt should work.
cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
}
callback.unrecoverableErrorOccurred(cause, true);
} else {
if (cause instanceof DownstreamIdleChannelTimeoutException) {
logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
} else {
logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
}
}
} finally {
// Mark the channel as broken so it will be closed and removed from the pool when it is returned.
markChannelAsBroken(ch);
// Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
// No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
// here will catch the cases where this channel does not have an active call but still needs to be
// closed (e.g. an idle channel timeout that happens in-between calls).
ch.close();
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
};
ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) throws Exception {
doErrorHandlingConsumer.accept(cause);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (logger.isDebugEnabled()) {
runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
}
// We only care if the channel was closed while the call was active.
if (callActiveHolder.heldObject)
doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
super.channelInactive(ctx);
}
};
// Set up the HTTP client pipeline.
ChannelPipeline p = ch.pipeline();
List<String> registeredHandlerNames = p.names();
// couldn't be removed at that time because it wasn't in the channel's eventLoop.
if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null)
p.remove(idleHandler);
}
if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
// Add the channel debug logger if desired.
p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
}
// Add/replace a downstream call timeout detector.
addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
if (isSecureHttpsCall) {
// SSL call. Make sure we add the SSL handler if necessary.
if (!registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
if (clientSslCtx == null) {
if (relaxedHttpsValidation) {
clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init((KeyStore) null);
clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
}
}
p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, clientSslCtx.newHandler(ch.alloc()));
}
} else {
// Not an SSL call. Remove the SSL handler if it's there.
if (registeredHandlerNames.contains(SSL_HANDLER_NAME))
p.remove(SSL_HANDLER_NAME);
}
// The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
// been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
// and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
// state, and if we barrel forward without cleaning this up the call will fail.
boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
boolean existingHttpClientCodecIsInBadState = false;
if (pipelineContainsHttpClientCodec) {
HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
if (currentHttpClientCodecInboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
} else {
int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
if (currentHttpClientCodecOutboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
}
}
}
// or replace it if it was in a bad state.
if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
}
// Update the chunk sender handler and error handler to the newly created versions that know about the correct
// callback, dtrace info, etc to use for this request.
addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project alibaba-mom by younfor.
the class Broker method bind.
/**
* 启动broker之前的netty服务器处理
* @param port
* @throws Exception
*/
public void bind(int port) throws Exception {
// 启动消费进度定时任务
// storeSubscribe(true, 5000);
// scanSendInfotMap();
EventLoopGroup bossGroup = new NioEventLoopGroup();
EventLoopGroup workerGroup = new NioEventLoopGroup(Runtime.getRuntime().availableProcessors() * 3);
try {
ServerBootstrap serverBootstrap = new ServerBootstrap();
serverBootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).option(ChannelOption.SO_REUSEADDR, true).childOption(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_BACKLOG, 1024 * 1024).childOption(ChannelOption.SO_KEEPALIVE, true).childHandler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel ch) throws Exception {
ch.pipeline().addLast(new RpcDecoder()).addLast(new RpcEncoder()).addLast(new SimpleChannelInboundHandler<Object>() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, Object info) throws Exception {
if (InfoBodyConsumer.class.isInstance(info)) {
// 接受消费者订阅处理
processConsumer((InfoBodyConsumer) info, ctx);
} else if (ConsumeResult.class.isInstance(info)) {
// 收到消费者ConsumeResult
logger.debug(" 收到消费者ConsumeResult");
ConsumerGroup.confirmConsumer((ConsumeResult) info, ctx);
// confirmConsumer((ConsumeResult) info, ctx);
} else if (MessageSend.class.isInstance(info)) {
// System.out.println("收到消息");
MessageManager.recieveMsg((MessageSend) info, ctx);
// MessageManager.recieveMsg((LinkedBlockingQueue)info,ctx);
}
}
@Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
logger.error("broker 异常:");
// logger.error(cause.getMessage());
cause.printStackTrace();
ctx.close();
}
});
}
});
//
ChannelFuture future = serverBootstrap.bind(port).sync();
logger.debug("mom服务启动成功...... 绑定端口" + port);
// 等待服务端监听端口关闭
future.channel().closeFuture().sync();
} catch (InterruptedException e) {
logger.error("smom服务抛出异常 " + e.getMessage());
} finally {
// 优雅退出 释放线程池资源
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
logger.debug("mom服务优雅的释放了线程资源...");
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler in project pinpoint by naver.
the class NettyIT method writeTest.
@Test
public void writeTest() throws Exception {
final CountDownLatch awaitLatch = new CountDownLatch(1);
EventLoopGroup workerGroup = new NioEventLoopGroup(2);
Bootstrap bootstrap = client(workerGroup);
final ChannelFuture connect = bootstrap.connect(webServer.getHostname(), webServer.getListeningPort());
connect.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
Channel channel = future.channel();
channel.pipeline().addLast(new SimpleChannelInboundHandler() {
@Override
protected void channelRead0(ChannelHandlerContext ctx, Object msg) {
awaitLatch.countDown();
}
});
HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
future.channel().writeAndFlush(request);
}
}
});
boolean await = awaitLatch.await(3000, TimeUnit.MILLISECONDS);
Assert.assertTrue(await);
final Channel channel = connect.channel();
try {
PluginTestVerifier verifier = PluginTestVerifierHolder.getInstance();
verifier.printCache();
verifier.verifyTrace(event("NETTY", Bootstrap.class.getMethod("connect", SocketAddress.class), annotation("netty.address", webServer.getHostAndPort())));
verifier.verifyTrace(event("NETTY", "io.netty.channel.DefaultChannelPromise.addListener(io.netty.util.concurrent.GenericFutureListener)"));
verifier.verifyTrace(event("ASYNC", "Asynchronous Invocation"));
verifier.verifyTrace(event("NETTY_INTERNAL", "io.netty.util.concurrent.DefaultPromise.notifyListenersNow()"));
verifier.verifyTrace(event("NETTY_INTERNAL", "io.netty.util.concurrent.DefaultPromise.notifyListener0(io.netty.util.concurrent.Future, io.netty.util.concurrent.GenericFutureListener)"));
verifier.verifyTrace(event("NETTY", "io.netty.channel.DefaultChannelPipeline.writeAndFlush(java.lang.Object)"));
verifier.verifyTrace(event("NETTY_HTTP", "io.netty.handler.codec.http.HttpObjectEncoder.encode(io.netty.channel.ChannelHandlerContext, java.lang.Object, java.util.List)", annotation("http.url", "/")));
} finally {
channel.close().sync();
workerGroup.shutdown();
}
}
Aggregations