use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelPipeline in project cxf by apache.
the class NettyHttpServletPipelineFactory method getDefaulHttpChannelPipeline.
protected ChannelPipeline getDefaulHttpChannelPipeline(Channel channel) throws Exception {
// Create a default pipeline implementation.
ChannelPipeline pipeline = channel.pipeline();
SslHandler sslHandler = configureServerSSLOnDemand();
if (sslHandler != null) {
LOG.log(Level.FINE, "Server SSL handler configured and added as an interceptor against the ChannelPipeline: {}", sslHandler);
pipeline.addLast("ssl", sslHandler);
}
pipeline.addLast("decoder", new HttpRequestDecoder());
pipeline.addLast("encoder", new HttpResponseEncoder());
pipeline.addLast("aggregator", new HttpObjectAggregator(maxChunkContentSize));
// Remove the following line if you don't want automatic content
// compression.
pipeline.addLast("deflater", new HttpContentCompressor());
// Set up the idle handler
pipeline.addLast("idle", new IdleStateHandler(nettyHttpServerEngine.getReadIdleTime(), nettyHttpServerEngine.getWriteIdleTime(), 0));
return pipeline;
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelPipeline in project riposte by Nike-Inc.
the class StreamingAsyncHttpClient method prepChannelForDownstreamCall.
protected void prepChannelForDownstreamCall(ChannelPool pool, Channel ch, StreamingCallback callback, Deque<Span> distributedSpanStackToUse, Map<String, String> mdcContextToUse, boolean isSecureHttpsCall, boolean relaxedHttpsValidation, boolean performSubSpanAroundDownstreamCalls, long downstreamCallTimeoutMillis, ObjectHolder<Boolean> callActiveHolder, ObjectHolder<Boolean> lastChunkSentDownstreamHolder) throws SSLException, NoSuchAlgorithmException, KeyStoreException {
ChannelHandler chunkSenderHandler = new SimpleChannelInboundHandler<HttpObject>() {
@Override
protected void channelRead0(ChannelHandlerContext downstreamCallCtx, HttpObject msg) throws Exception {
try {
// the call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (msg instanceof LastHttpContent) {
lastChunkSentDownstreamHolder.heldObject = true;
if (performSubSpanAroundDownstreamCalls) {
// Complete the subspan.
runnableWithTracingAndMdc(() -> {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}, distributedSpanStackToUse, mdcContextToUse).run();
}
}
HttpObject msgToPass = msg;
if (msg instanceof HttpResponse) {
// We can't pass the original HttpResponse back to the callback due to intricacies of how
// Netty handles determining the last chunk. If we do, and the callback ends up writing
// the message out to the client (which happens during proxy routing for example), then
// msg's headers might get modified - potentially causing this channel pipeline to
// never send a LastHttpContent, which will in turn cause an indefinite hang.
HttpResponse origHttpResponse = (HttpResponse) msg;
HttpResponse httpResponse = (msg instanceof FullHttpResponse) ? new DefaultFullHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus(), ((FullHttpResponse) msg).content()) : new DefaultHttpResponse(origHttpResponse.getProtocolVersion(), origHttpResponse.getStatus());
httpResponse.headers().add(origHttpResponse.headers());
msgToPass = httpResponse;
}
callback.messageReceived(msgToPass);
} else {
if (shouldLogBadMessagesAfterRequestFinishes) {
runnableWithTracingAndMdc(() -> logger.warn("Received HttpObject msg when call was not active: {}", String.valueOf(msg)), distributedSpanStackToUse, mdcContextToUse).run();
}
}
} finally {
if (msg instanceof LastHttpContent) {
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "last content chunk sent", distributedSpanStackToUse, mdcContextToUse);
}
}
}
};
Consumer<Throwable> doErrorHandlingConsumer = (cause) -> {
Pair<Deque<Span>, Map<String, String>> originalThreadInfo = null;
try {
// Setup tracing and MDC so our log messages have the correct distributed trace info, etc.
originalThreadInfo = linkTracingAndMdcToCurrentThread(distributedSpanStackToUse, mdcContextToUse);
// call is fully processed should not trigger the behavior a second time.
if (callActiveHolder.heldObject) {
if (performSubSpanAroundDownstreamCalls) {
if (distributedSpanStackToUse == null || distributedSpanStackToUse.size() < 2)
Tracer.getInstance().completeRequestSpan();
else
Tracer.getInstance().completeSubSpan();
}
Tracer.getInstance().unregisterFromThread();
if (cause instanceof Errors.NativeIoException) {
// NativeIoExceptions are often setup to not have stack traces which is bad for debugging.
// Wrap it in a NativeIoExceptionWrapper that maps to a 503 since this is likely a busted
// connection and a second attempt should work.
cause = new NativeIoExceptionWrapper("Caught a NativeIoException in the downstream streaming call pipeline. Wrapped it in a " + "NativeIoExceptionWrapper so that it maps to a 503 and provides a usable stack trace " + "in the logs.", (Errors.NativeIoException) cause);
}
callback.unrecoverableErrorOccurred(cause, true);
} else {
if (cause instanceof DownstreamIdleChannelTimeoutException) {
logger.debug("A channel used for downstream calls will be closed because it was idle too long. " + "This is normal behavior and does not indicate a downstream call failure: {}", cause.toString());
} else {
logger.warn("Received exception in downstream call pipeline after the call was finished. " + "Not necessarily anything to worry about but in case it helps debugging the " + "exception was: {}", cause.toString());
}
}
} finally {
// Mark the channel as broken so it will be closed and removed from the pool when it is returned.
markChannelAsBroken(ch);
// Release it back to the pool if possible/necessary so the pool can do its usual cleanup.
releaseChannelBackToPoolIfCallIsActive(ch, pool, callActiveHolder, "error received in downstream pipeline: " + cause.toString(), distributedSpanStackToUse, mdcContextToUse);
// No matter what the cause is we want to make sure the channel is closed. Doing this raw ch.close()
// here will catch the cases where this channel does not have an active call but still needs to be
// closed (e.g. an idle channel timeout that happens in-between calls).
ch.close();
// Unhook the tracing and MDC stuff from this thread now that we're done.
unlinkTracingAndMdcFromCurrentThread(originalThreadInfo);
}
};
ChannelHandler errorHandler = new ChannelInboundHandlerAdapter() {
@Override
public void exceptionCaught(ChannelHandlerContext downstreamCallCtx, Throwable cause) throws Exception {
doErrorHandlingConsumer.accept(cause);
}
@Override
public void channelInactive(ChannelHandlerContext ctx) throws Exception {
if (logger.isDebugEnabled()) {
runnableWithTracingAndMdc(() -> logger.debug("Downstream channel closing. call_active={}, last_chunk_sent_downstream={}, channel_id={}", callActiveHolder.heldObject, lastChunkSentDownstreamHolder.heldObject, ctx.channel().toString()), distributedSpanStackToUse, mdcContextToUse).run();
}
// We only care if the channel was closed while the call was active.
if (callActiveHolder.heldObject)
doErrorHandlingConsumer.accept(new DownstreamChannelClosedUnexpectedlyException(ch));
super.channelInactive(ctx);
}
};
// Set up the HTTP client pipeline.
ChannelPipeline p = ch.pipeline();
List<String> registeredHandlerNames = p.names();
// couldn't be removed at that time because it wasn't in the channel's eventLoop.
if (registeredHandlerNames.contains(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME)) {
ChannelHandler idleHandler = p.get(DOWNSTREAM_IDLE_CHANNEL_TIMEOUT_HANDLER_NAME);
if (idleHandler != null)
p.remove(idleHandler);
}
if (debugChannelLifecycleLoggingEnabled && !registeredHandlerNames.contains(DEBUG_LOGGER_HANDLER_NAME)) {
// Add the channel debug logger if desired.
p.addFirst(DEBUG_LOGGER_HANDLER_NAME, new LoggingHandler(DOWNSTREAM_CLIENT_CHANNEL_DEBUG_LOGGER_NAME, LogLevel.DEBUG));
}
// Add/replace a downstream call timeout detector.
addOrReplacePipelineHandler(new DownstreamIdleChannelTimeoutHandler(downstreamCallTimeoutMillis, () -> callActiveHolder.heldObject, true, "StreamingAsyncHttpClientChannel-call-timeout", distributedSpanStackToUse, mdcContextToUse), DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, p, registeredHandlerNames);
if (isSecureHttpsCall) {
// SSL call. Make sure we add the SSL handler if necessary.
if (!registeredHandlerNames.contains(SSL_HANDLER_NAME)) {
if (clientSslCtx == null) {
if (relaxedHttpsValidation) {
clientSslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
TrustManagerFactory tmf = TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm());
tmf.init((KeyStore) null);
clientSslCtx = SslContextBuilder.forClient().trustManager(tmf).build();
}
}
p.addAfter(DOWNSTREAM_CALL_TIMEOUT_HANDLER_NAME, SSL_HANDLER_NAME, clientSslCtx.newHandler(ch.alloc()));
}
} else {
// Not an SSL call. Remove the SSL handler if it's there.
if (registeredHandlerNames.contains(SSL_HANDLER_NAME))
p.remove(SSL_HANDLER_NAME);
}
// The HttpClientCodec handler deals with HTTP codec stuff so you don't have to. Set it up if it hasn't already
// been setup, and inspect it to make sure it's in a "ready to handle a new request" state. Some rare
// and currently unknown edgecases can cause us to hit this point with the HttpClientCodec in an unclean
// state, and if we barrel forward without cleaning this up the call will fail.
boolean pipelineContainsHttpClientCodec = registeredHandlerNames.contains(HTTP_CLIENT_CODEC_HANDLER_NAME);
boolean existingHttpClientCodecIsInBadState = false;
if (pipelineContainsHttpClientCodec) {
HttpClientCodec currentCodec = (HttpClientCodec) p.get(HTTP_CLIENT_CODEC_HANDLER_NAME);
int currentHttpClientCodecInboundState = determineHttpClientCodecInboundState(currentCodec);
if (currentHttpClientCodecInboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec inbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_inbound_state={}", currentHttpClientCodecInboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
} else {
int currentHttpClientCodecOutboundState = determineHttpClientCodecOutboundState(currentCodec);
if (currentHttpClientCodecOutboundState != 0) {
runnableWithTracingAndMdc(() -> logger.warn("HttpClientCodec outbound state was not 0. It will be replaced with a fresh HttpClientCodec. " + "bad_httpclientcodec_outbound_state={}", currentHttpClientCodecOutboundState), distributedSpanStackToUse, mdcContextToUse).run();
existingHttpClientCodecIsInBadState = true;
}
}
}
// or replace it if it was in a bad state.
if (!pipelineContainsHttpClientCodec || existingHttpClientCodecIsInBadState) {
addOrReplacePipelineHandler(new HttpClientCodec(4096, 8192, 8192, true), HTTP_CLIENT_CODEC_HANDLER_NAME, p, registeredHandlerNames);
}
// Update the chunk sender handler and error handler to the newly created versions that know about the correct
// callback, dtrace info, etc to use for this request.
addOrReplacePipelineHandler(chunkSenderHandler, CHUNK_SENDER_HANDLER_NAME, p, registeredHandlerNames);
addOrReplacePipelineHandler(errorHandler, ERROR_HANDLER_NAME, p, registeredHandlerNames);
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelPipeline in project pravega by pravega.
the class ConnectionFactoryImpl method establishConnection.
@Override
public CompletableFuture<ClientConnection> establishConnection(PravegaNodeUri location, ReplyProcessor rp) {
Preconditions.checkNotNull(location);
Exceptions.checkNotClosed(closed.get(), this);
final SslContext sslCtx;
if (clientConfig.isEnableTls()) {
try {
SslContextBuilder sslCtxFactory = SslContextBuilder.forClient();
if (Strings.isNullOrEmpty(clientConfig.getTrustStore())) {
sslCtxFactory = sslCtxFactory.trustManager(FingerprintTrustManagerFactory.getInstance(FingerprintTrustManagerFactory.getDefaultAlgorithm()));
} else {
sslCtxFactory = SslContextBuilder.forClient().trustManager(new File(clientConfig.getTrustStore()));
}
sslCtx = sslCtxFactory.build();
} catch (SSLException | NoSuchAlgorithmException e) {
throw new RuntimeException(e);
}
} else {
sslCtx = null;
}
AppendBatchSizeTracker batchSizeTracker = new AppendBatchSizeTrackerImpl();
ClientConnectionInboundHandler handler = new ClientConnectionInboundHandler(location.getEndpoint(), rp, batchSizeTracker);
Bootstrap b = new Bootstrap();
b.group(group).channel(nio ? NioSocketChannel.class : EpollSocketChannel.class).option(ChannelOption.TCP_NODELAY, true).handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
if (sslCtx != null) {
SslHandler sslHandler = sslCtx.newHandler(ch.alloc(), location.getEndpoint(), location.getPort());
if (clientConfig.isValidateHostName()) {
SSLEngine sslEngine = sslHandler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm("HTTPS");
sslEngine.setSSLParameters(sslParameters);
}
p.addLast(sslHandler);
}
// p.addLast(new LoggingHandler(LogLevel.INFO));
p.addLast(new ExceptionLoggingHandler(location.getEndpoint()), new CommandEncoder(batchSizeTracker), new LengthFieldBasedFrameDecoder(WireCommands.MAX_WIRECOMMAND_SIZE, 4, 4), new CommandDecoder(), handler);
}
});
// Start the client.
CompletableFuture<ClientConnection> connectionComplete = new CompletableFuture<>();
try {
b.connect(location.getEndpoint(), location.getPort()).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) {
if (future.isSuccess()) {
// since ChannelFuture is complete future.channel() is not a blocking call.
Channel ch = future.channel();
log.debug("Connect operation completed for channel:{}, local address:{}, remote address:{}", ch.id(), ch.localAddress(), ch.remoteAddress());
// Once a channel is closed the channel group implementation removes it.
allChannels.add(ch);
connectionComplete.complete(handler);
} else {
connectionComplete.completeExceptionally(new ConnectionFailedException(future.cause()));
}
}
});
} catch (Exception e) {
connectionComplete.completeExceptionally(new ConnectionFailedException(e));
}
// check if channel is registered.
CompletableFuture<Void> channelRegisteredFuture = new CompletableFuture<>();
handler.completeWhenRegistered(channelRegisteredFuture);
return connectionComplete.thenCombine(channelRegisteredFuture, (clientConnection, v) -> clientConnection);
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelPipeline in project pravega by pravega.
the class ConnectionFactoryImplTest method setUp.
@Before
public void setUp() throws Exception {
// Configure SSL.
port = TestUtils.getAvailableListenPort();
final SslContext sslCtx;
if (ssl) {
try {
sslCtx = SslContextBuilder.forServer(new File("../config/cert.pem"), new File("../config/key.pem")).build();
} catch (SSLException e) {
throw new RuntimeException(e);
}
} else {
sslCtx = null;
}
boolean nio = false;
EventLoopGroup bossGroup;
EventLoopGroup workerGroup;
try {
bossGroup = new EpollEventLoopGroup(1);
workerGroup = new EpollEventLoopGroup();
} catch (ExceptionInInitializerError | UnsatisfiedLinkError | NoClassDefFoundError e) {
nio = true;
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup();
}
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup).channel(nio ? NioServerSocketChannel.class : EpollServerSocketChannel.class).option(ChannelOption.SO_BACKLOG, 100).handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
if (sslCtx != null) {
SslHandler handler = sslCtx.newHandler(ch.alloc());
SSLEngine sslEngine = handler.engine();
SSLParameters sslParameters = sslEngine.getSSLParameters();
sslParameters.setEndpointIdentificationAlgorithm("LDAPS");
sslEngine.setSSLParameters(sslParameters);
p.addLast(handler);
}
}
});
// Start the server.
serverChannel = b.bind("localhost", port).awaitUninterruptibly().channel();
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelPipeline in project transporter by wang4ever.
the class WSChildHandlerInitializer method initChannel.
@Override
public void initChannel(SocketChannel ch) throws Exception {
WSConfig conf = this.config.getWsConfig();
if (logger.isDebugEnabled())
logger.debug("Initital channel handler...\twebsocketPath={}", conf.getWebsocketPath());
ChannelPipeline p = ch.pipeline();
// Configure SSL.
if (conf.getSslEnable()) {
/*
* SelfSignedCertificate ssc = new SelfSignedCertificate();
* SslContext sslCtx =
* SslContextBuilder.forServer(ssc.certificate(),
* ssc.privateKey()).build();
*/
File keyCertChainFile = new File(Resources.getResource(conf.getKeyCertChainFile()).getFile());
File keyFile = new File(Resources.getResource(conf.getKeyFile()).getFile());
SslContext sslCtx = SslContextBuilder.forServer(keyCertChainFile, keyFile).build();
p.addLast(sslCtx.newHandler(ch.alloc()));
if (logger.isDebugEnabled())
logger.debug("The ssl(wss) handler has been enabled. sslCtx={}", sslCtx.toString());
}
// pipeline管理channel中的Handler,在channel队列中添加一个handler来处理业务
if (conf.getLoggingEnable()) {
p.addLast(new LoggingHandler(LogLevel.valueOf(conf.getLoggingLevel())));
if (logger.isInfoEnabled())
logger.info("Netty internal log has been used. (WS)level={}", conf.getLoggingLevel());
}
IdleStateHandler idleHandler = new IdleStateHandler(conf.getReadIdleSeconds(), conf.getWriteIdleSeconds(), conf.getAllIdleSeconds());
p.addLast(idleHandler);
// 将请求和应答消息解码为HTTP消息
p.addLast("http-codec", new HttpServerCodec());
// 将HTTP消息的多个部分合成一条完整的HTTP消息
p.addLast("aggregator", new HttpObjectAggregator(65536));
// 向客户端发送HTML5文件
p.addLast("http-chunked", new ChunkedWriteHandler());
p.addLast("ws-protocol", new WebSocketServerProtocolHandler(conf.getWebsocketPath(), null, true));
p.addLast("text-handler", SpringContextHolder.getBean("textWSFrameHandler"));
}
Aggregations