use of io.netty.handler.logging.LoggingHandler in project apn-proxy by apn-proxy.
the class ApnProxyServerChannelInitializer method initChannel.
@Override
public void initChannel(SocketChannel channel) throws Exception {
ChannelPipeline pipeline = channel.pipeline();
pipeline.addLast("idlestate", new IdleStateHandler(0, 0, 3, TimeUnit.MINUTES));
pipeline.addLast("idlehandler", new ApnProxyIdleHandler());
pipeline.addLast("datalog", new LoggingHandler("PRE_BYTE_LOGGER", LogLevel.DEBUG));
if (ApnProxyConfig.getConfig().getListenType() == ApnProxyListenType.SSL) {
SSLEngine engine = ApnProxySSLContextFactory.createServerSSLSSLEngine();
pipeline.addLast("apnproxy.encrypt", new SslHandler(engine));
} else if (ApnProxyConfig.getConfig().getListenType() == ApnProxyListenType.AES) {
byte[] key = ApnProxyConfig.getConfig().getKey();
byte[] iv = ApnProxyConfig.getConfig().getIv();
pipeline.addLast("apnproxy.encrypt", new ApnProxyAESEncoder(key, iv));
pipeline.addLast("apnproxy.decrypt", new ApnProxyAESDecoder(key, iv));
}
pipeline.addLast("log", new LoggingHandler("BYTE_LOGGER", LogLevel.INFO));
pipeline.addLast("codec", new HttpServerCodec());
pipeline.addLast(ApnProxyPreHandler.HANDLER_NAME, new ApnProxyPreHandler());
pipeline.addLast(ApnProxySchemaHandler.HANDLER_NAME, new ApnProxySchemaHandler());
}
use of io.netty.handler.logging.LoggingHandler in project apn-proxy by apn-proxy.
the class TestHttpClientChannelInitializer method initChannel.
@Override
public void initChannel(SocketChannel ch) throws Exception {
// Create a default pipeline implementation.
ChannelPipeline p = ch.pipeline();
p.addLast("log", new LoggingHandler(LogLevel.INFO));
// if (ssl) {
// SSLContext sslcontext = SSLContext.getInstance("TLS");
//
// sslcontext.init(null, null, null);
//
// SSLEngine engine = sslcontext.createSSLEngine();
// engine.setUseClientMode(true);
//
// p.addLast("ssl", new SslHandler(engine));
// }
p.addLast("codec", new HttpClientCodec());
// Remove the following line if you don't want automatic content decompression.
p.addLast("inflater", new HttpContentDecompressor());
p.addLast("handler", new TestHttpClientHandler());
}
use of io.netty.handler.logging.LoggingHandler in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.
protected void prepChannelForDownstreamCall(ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) throws Exception {
try {
// the call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (msg instanceof LastHttpContent) {
lastChunkSentDownstreamHolder.heldObject = true;
if (performSubSpanAroundDownstreamCalls) {
// Complete the subspan.
runnableWithTracingAndMdc(() -> {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}, distributedSpanStackToUse, mdcContextToUse).run();
}
}
HttpObject msgToPass = msg;
if (msg instanceof HttpResponse) {
// We can't pass the original HttpResponse back to the callback due to intricacies of how
// Netty handles determining the last chunk. If we do, and the callback ends up writing
// the message out to the client (which happens during proxy routing for example), then
// msg's headers might get modified - potentially causing this channel pipeline to
// never send a LastHttpContent, which will in turn cause an indefinite hang.
HttpResponse origHttpResponse = (HttpResponse) msg;
HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus());
httpResponse.headers().add(origHttpResponse.headers());
msgToPass = httpResponse;
}
callback.messageReceived(msgToPass);
} else {
if (shouldLogBadMessagesAfterRequestFinishes) {
runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", String.valueOf(msg)), distributedSpanStackToUse, mdcContextToUse).run();
}
}
} finally {
if (msg instanceof LastHttpContent) {
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
}
}
}
};
Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
// call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (performSubSpanAroundDownstreamCalls) {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}
Tracer.getInstance().unregisterFromThread();
if (cause instanceof Errors.NativeIoException) {
// NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
// Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
// connection and a second attempt should work.
cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
}
callback.unrecoverableErrorOccurred(cause);
} else {
if (cause instanceof DownstreamIdleChannelTimeoutException) {
logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
} else {
logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
}
}
} finally {
// Mark the channel as broken so it will be closed and removed from the pool when it is returned.
markChannelAsBroken(ch);
// Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
// No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
// here will catch the cases where this channel does not have an active call but still needs to be
// closed (e.g. an idle channel timeout that happens in-between calls).
ch.close();
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
};
ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) throws Exception {
doErrorHandlingConsumer.accept(cause);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (logger.isDebugEnabled()) {
runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
}
// We only care if the channel was closed while the call was active.
if (callActiveHolder.heldObject)
doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
super.channelInactive(ctx);
}
};
// Set up the HTTP client pipeline.
ChannelPipeline p = ch.pipeline();
List<String> registeredHandlerNames = p.names();
// couldn't be removed at that time because it wasn't in the channel's eventLoop.
if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null)
p.remove(idleHandler);
}
if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
// Add the channel debug logger if desired.
p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
}
// Add/replace a downstream call timeout detector.
addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
if (isSecureHttpsCall) {
// SSL call. Make sure we add the SSL handler if necessary.
if (!registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
if (clientSslCtx == null) {
if (relaxedHttpsValidation) {
clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init((KeyStore) null);
clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
}
}
p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, clientSslCtx.newHandler(ch.alloc()));
}
} else {
// Not an SSL call. Remove the SSL handler if it's there.
if (registeredHandlerNames.contains(SSL_HANDLER_NAME))
p.remove(SSL_HANDLER_NAME);
}
// The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
// been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
// and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
// state, and if we barrel forward without cleaning this up the call will fail.
boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
boolean existingHttpClientCodecIsInBadState = false;
if (pipelineContainsHttpClientCodec) {
HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
if (currentHttpClientCodecInboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
} else {
int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
if (currentHttpClientCodecOutboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
}
}
}
// or replace it if it was in a bad state.
if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
}
// Update the chunk sender handler and error handler to the newly created versions that know about the correct
// callback, dtrace info, etc to use for this request.
addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
use of io.netty.handler.logging.LoggingHandler in project riposte by Nike-Inc.
the class Server method startup.
public void startup() throws CertificateException, IOException, InterruptedException {
if (startedUp) {
throw new IllegalArgumentException("This Server instance has already started. " + "You can only call startup() once");
}
// Figure out what port to bind to.
int port = Integer.parseInt(System.getProperty("endpointsPort", serverConfig.isEndpointsUseSsl() ? String.valueOf(serverConfig.endpointsSslPort()) : String.valueOf(serverConfig.endpointsPort())));
// Configure SSL if desired.
final SslContext sslCtx;
if (serverConfig.isEndpointsUseSsl()) {
sslCtx = serverConfig.createSslContext();
} else {
sslCtx = null;
}
// Configure the server
EventLoopGroup bossGroup;
EventLoopGroup workerGroup;
Class<? extends ServerChannel> channelClass;
// NIO event loop group.
if (Epoll.isAvailable()) {
logger.info("The epoll native transport is available. Using epoll instead of NIO. " + "riposte_server_using_native_epoll_transport=true");
bossGroup = (serverConfig.bossThreadFactory() == null) ? new EpollEventLoopGroup(serverConfig.numBossThreads()) : new EpollEventLoopGroup(serverConfig.numBossThreads(), serverConfig.bossThreadFactory());
workerGroup = (serverConfig.workerThreadFactory() == null) ? new EpollEventLoopGroup(serverConfig.numWorkerThreads()) : new EpollEventLoopGroup(serverConfig.numWorkerThreads(), serverConfig.workerThreadFactory());
channelClass = EpollServerSocketChannel.class;
} else {
logger.info("The epoll native transport is NOT available or you are not running on a compatible " + "OS/architecture. Using NIO. riposte_server_using_native_epoll_transport=false");
bossGroup = (serverConfig.bossThreadFactory() == null) ? new NioEventLoopGroup(serverConfig.numBossThreads()) : new NioEventLoopGroup(serverConfig.numBossThreads(), serverConfig.bossThreadFactory());
workerGroup = (serverConfig.workerThreadFactory() == null) ? new NioEventLoopGroup(serverConfig.numWorkerThreads()) : new NioEventLoopGroup(serverConfig.numWorkerThreads(), serverConfig.workerThreadFactory());
channelClass = NioServerSocketChannel.class;
}
eventLoopGroups.add(bossGroup);
eventLoopGroups.add(workerGroup);
// Figure out which channel initializer should set up the channel pipelines for new channels.
ChannelInitializer<SocketChannel> channelInitializer = serverConfig.customChannelInitializer();
if (channelInitializer == null) {
// No custom channel initializer, so use the default
channelInitializer = new HttpChannelInitializer(sslCtx, serverConfig.maxRequestSizeInBytes(), serverConfig.appEndpoints(), serverConfig.requestAndResponseFilters(), serverConfig.longRunningTaskExecutor(), serverConfig.riposteErrorHandler(), serverConfig.riposteUnhandledErrorHandler(), serverConfig.requestContentValidationService(), serverConfig.defaultRequestContentDeserializer(), new ResponseSender(serverConfig.defaultResponseContentSerializer(), serverConfig.errorResponseBodySerializer()), serverConfig.metricsListener(), serverConfig.defaultCompletableFutureTimeoutInMillisForNonblockingEndpoints(), serverConfig.accessLogger(), serverConfig.pipelineCreateHooks(), serverConfig.requestSecurityValidator(), serverConfig.workerChannelIdleTimeoutMillis(), serverConfig.proxyRouterConnectTimeoutMillis(), serverConfig.incompleteHttpCallTimeoutMillis(), serverConfig.maxOpenIncomingServerChannels(), serverConfig.isDebugChannelLifecycleLoggingEnabled(), serverConfig.userIdHeaderKeys());
}
// Create the server bootstrap
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup).channel(channelClass).childHandler(channelInitializer);
// execute pre startup hooks
if (serverConfig.preServerStartupHooks() != null) {
for (PreServerStartupHook hook : serverConfig.preServerStartupHooks()) {
hook.executePreServerStartupHook(b);
}
}
if (serverConfig.isDebugChannelLifecycleLoggingEnabled())
b.handler(new LoggingHandler(SERVER_BOSS_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
// Bind the server to the desired port and start it up so it is ready to receive requests
Channel ch = b.bind(port).sync().channel();
// execute post startup hooks
if (serverConfig.postServerStartupHooks() != null) {
for (PostServerStartupHook hook : serverConfig.postServerStartupHooks()) {
hook.executePostServerStartupHook(serverConfig, ch);
}
}
channels.add(ch);
logger.info("Server channel open and accepting " + (serverConfig.isEndpointsUseSsl() ? "https" : "http") + " requests on port " + port);
startedUp = true;
// Add a shutdown hook so we can gracefully stop the server when the JVM is going down
Runtime.getRuntime().addShutdownHook(new Thread() {
@Override
public void run() {
try {
shutdown();
} catch (Exception e) {
logger.warn("Error shutting down Riposte", e);
}
}
});
}
use of io.netty.handler.logging.LoggingHandler in project asterixdb by apache.
the class HttpServer method doStart.
protected void doStart() throws InterruptedException {
/*
* This is a hacky way to ensure that IServlets with more specific paths are checked first.
* For example:
* "/path/to/resource/"
* is checked before
* "/path/to/"
* which in turn is checked before
* "/path/"
* Note that it doesn't work for the case where multiple paths map to a single IServlet
*/
Collections.sort(servlets, (l1, l2) -> l2.getPaths()[0].length() - l1.getPaths()[0].length());
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WRITE_BUFFER_WATER_MARK).handler(new LoggingHandler(LogLevel.DEBUG)).childHandler(new HttpServerInitializer(this));
channel = b.bind(port).sync().channel();
}
Aggregations