use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.
the class NettyClientStream method start.
@Override
public void start(ClientStreamListener listener) {
super.start(listener);
// Convert the headers into Netty HTTP/2 headers.
AsciiString defaultPath = (AsciiString) methodDescriptorAccessor.geRawMethodName(method);
if (defaultPath == null) {
defaultPath = new AsciiString("/" + method.getFullMethodName());
methodDescriptorAccessor.setRawMethodName(method, defaultPath);
}
headers.discardAll(GrpcUtil.USER_AGENT_KEY);
Http2Headers http2Headers = Utils.convertClientHeaders(headers, scheme, defaultPath, authority, userAgent);
headers = null;
ChannelFutureListener failureListener = new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
// Stream creation failed. Close the stream if not already closed.
Status s = transportState().statusFromFailedFuture(future);
transportState().transportReportStatus(s, true, new Metadata());
}
}
};
// Write the command requesting the creation of the stream.
writeQueue.enqueue(new CreateStreamCommand(http2Headers, transportState()), !method.getType().clientSendsOneMessage()).addListener(failureListener);
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project grpc-java by grpc.
the class NettyClientHandler method sendPingFrame.
/**
* Sends a PING frame. If a ping operation is already outstanding, the callback in the message is
* registered to be called when the existing operation completes, and no new frame is sent.
*/
private void sendPingFrame(ChannelHandlerContext ctx, SendPingCommand msg, ChannelPromise promise) {
// Don't check lifecycleManager.getShutdownStatus() since we want to allow pings after shutdown
// but before termination. After termination, messages will no longer arrive because the
// pipeline clears all handlers on channel close.
PingCallback callback = msg.callback();
Executor executor = msg.executor();
// any outstanding operation
if (ping != null) {
promise.setSuccess();
ping.addCallback(callback, executor);
return;
}
// Use a new promise to prevent calling the callback twice on write failure: here and in
// NettyClientTransport.ping(). It may appear strange, but it will behave the same as if
// ping != null above.
promise.setSuccess();
promise = ctx().newPromise();
// set outstanding operation
long data = USER_PING_PAYLOAD;
ByteBuf buffer = ctx.alloc().buffer(8);
buffer.writeLong(data);
Stopwatch stopwatch = Stopwatch.createStarted(ticker);
ping = new Http2Ping(data, stopwatch);
ping.addCallback(callback, executor);
// and then write the ping
encoder().writePing(ctx, false, buffer, promise);
ctx.flush();
final Http2Ping finalPing = ping;
promise.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
Throwable cause = future.cause();
if (cause instanceof ClosedChannelException) {
cause = lifecycleManager.getShutdownThrowable();
if (cause == null) {
cause = Status.UNKNOWN.withDescription("Ping failed but for unknown reason.").withCause(future.cause()).asException();
}
}
finalPing.failed(cause);
if (ping == finalPing) {
ping = null;
}
}
}
});
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project rest.li by linkedin.
the class RAPResponseDecoder method channelRead0.
@Override
protected void channelRead0(final ChannelHandlerContext ctx, HttpObject msg) throws Exception {
if (msg instanceof HttpResponse) {
HttpResponse m = (HttpResponse) msg;
_shouldCloseConnection = !HttpUtil.isKeepAlive(m);
if (HttpUtil.is100ContinueExpected(m)) {
ctx.writeAndFlush(CONTINUE).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
ctx.fireExceptionCaught(future.cause());
}
}
});
}
if (!m.decoderResult().isSuccess()) {
ctx.fireExceptionCaught(m.decoderResult().cause());
return;
}
// remove chunked encoding.
if (HttpUtil.isTransferEncodingChunked(m)) {
HttpUtil.setTransferEncodingChunked(m, false);
}
Timeout<None> timeout = ctx.channel().attr(TIMEOUT_ATTR_KEY).getAndRemove();
if (timeout == null) {
LOG.debug("dropped a response after channel inactive or exception had happened.");
return;
}
final TimeoutBufferedWriter writer = new TimeoutBufferedWriter(ctx, _maxContentLength, BUFFER_HIGH_WATER_MARK, BUFFER_LOW_WATER_MARK, timeout);
EntityStream entityStream = EntityStreams.newEntityStream(writer);
_chunkedMessageWriter = writer;
StreamResponseBuilder builder = new StreamResponseBuilder();
builder.setStatus(m.status().code());
for (Map.Entry<String, String> e : m.headers()) {
String key = e.getKey();
String value = e.getValue();
if (key.equalsIgnoreCase(HttpConstants.RESPONSE_COOKIE_HEADER_NAME)) {
builder.addCookie(value);
} else {
builder.unsafeAddHeaderValue(key, value);
}
}
ctx.fireChannelRead(builder.build(entityStream));
} else if (msg instanceof HttpContent) {
HttpContent chunk = (HttpContent) msg;
TimeoutBufferedWriter currentWriter = _chunkedMessageWriter;
// Sanity check
if (currentWriter == null) {
throw new IllegalStateException("received " + HttpContent.class.getSimpleName() + " without " + HttpResponse.class.getSimpleName());
}
if (!chunk.decoderResult().isSuccess()) {
this.exceptionCaught(ctx, chunk.decoderResult().cause());
}
currentWriter.processHttpChunk(chunk);
if (chunk instanceof LastHttpContent) {
_chunkedMessageWriter = null;
}
} else {
// something must be wrong, but let's proceed so that
// handler after us has a chance to process it.
ctx.fireChannelRead(msg);
}
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project rest.li by linkedin.
the class ChannelPoolLifecycle method create.
@Override
public void create(final Callback<Channel> channelCallback) {
final long start = System.currentTimeMillis();
_bootstrap.connect(_remoteAddress).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture channelFuture) throws Exception {
if (channelFuture.isSuccess()) {
synchronized (_createTimeTracker) {
_createTimeTracker.addValue(System.currentTimeMillis() - start);
}
Channel c = channelFuture.channel();
if (_tcpNoDelay) {
c.config().setOption(ChannelOption.TCP_NODELAY, true);
}
_channelGroup.add(c);
channelCallback.onSuccess(c);
} else {
Throwable cause = channelFuture.cause();
if (cause instanceof ConnectException) {
channelCallback.onError(new RetriableRequestException(cause));
} else {
channelCallback.onError(HttpNettyStreamClient.toException(cause));
}
}
}
});
}
use of org.apache.flink.shaded.netty4.io.netty.channel.ChannelFutureListener in project camel by apache.
the class NettyProducer method processWithConnectedChannel.
public void processWithConnectedChannel(final Exchange exchange, final BodyReleaseCallback callback, final ChannelFuture channelFuture, final Object body) {
// remember channel so we can reuse it
final Channel channel = channelFuture.channel();
if (getConfiguration().isReuseChannel() && exchange.getProperty(NettyConstants.NETTY_CHANNEL) == null) {
exchange.setProperty(NettyConstants.NETTY_CHANNEL, channel);
// and defer closing the channel until we are done routing the exchange
exchange.addOnCompletion(new SynchronizationAdapter() {
@Override
public void onComplete(Exchange exchange) {
// should channel be closed after complete?
Boolean close;
if (ExchangeHelper.isOutCapable(exchange)) {
close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
} else {
close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
}
// should we disconnect, the header can override the configuration
boolean disconnect = getConfiguration().isDisconnect();
if (close != null) {
disconnect = close;
}
if (disconnect) {
LOG.trace("Closing channel {} as routing the Exchange is done", channel);
NettyHelper.close(channel);
}
releaseChannel(channelFuture);
}
});
}
if (exchange.getIn().getHeader(NettyConstants.NETTY_REQUEST_TIMEOUT) != null) {
long timeoutInMs = exchange.getIn().getHeader(NettyConstants.NETTY_REQUEST_TIMEOUT, Long.class);
ChannelHandler oldHandler = channel.pipeline().get("timeout");
ReadTimeoutHandler newHandler = new ReadTimeoutHandler(timeoutInMs, TimeUnit.MILLISECONDS);
if (oldHandler == null) {
channel.pipeline().addBefore("handler", "timeout", newHandler);
} else {
channel.pipeline().replace(oldHandler, "timeout", newHandler);
}
}
//This will refer to original callback since netty will release body by itself
final AsyncCallback producerCallback;
if (configuration.isReuseChannel()) {
// use callback as-is because we should not put it back in the pool as NettyProducerCallback would do
// as when reuse channel is enabled it will put the channel back in the pool when exchange is done using on completion
producerCallback = callback.getOriginalCallback();
} else {
producerCallback = new NettyProducerCallback(channelFuture, callback.getOriginalCallback());
}
// setup state as attachment on the channel, so we can access the state later when needed
putState(channel, new NettyCamelState(producerCallback, exchange));
// here we need to setup the remote address information here
InetSocketAddress remoteAddress = null;
if (!isTcp()) {
remoteAddress = new InetSocketAddress(configuration.getHost(), configuration.getPort());
}
// write body
NettyHelper.writeBodyAsync(LOG, channel, remoteAddress, body, exchange, new ChannelFutureListener() {
public void operationComplete(ChannelFuture channelFuture) throws Exception {
LOG.trace("Operation complete {}", channelFuture);
if (!channelFuture.isSuccess()) {
// no success then exit, (any exception has been handled by ClientChannelHandler#exceptionCaught)
return;
}
// if we do not expect any reply then signal callback to continue routing
if (!configuration.isSync()) {
try {
// should channel be closed after complete?
Boolean close;
if (ExchangeHelper.isOutCapable(exchange)) {
close = exchange.getOut().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
} else {
close = exchange.getIn().getHeader(NettyConstants.NETTY_CLOSE_CHANNEL_WHEN_COMPLETE, Boolean.class);
}
// should we disconnect, the header can override the configuration
boolean disconnect = getConfiguration().isDisconnect();
if (close != null) {
disconnect = close;
}
// we should not close if we are reusing the channel
if (!configuration.isReuseChannel() && disconnect) {
if (LOG.isTraceEnabled()) {
LOG.trace("Closing channel when complete at address: {}", getEndpoint().getConfiguration().getAddress());
}
NettyHelper.close(channel);
}
} finally {
// signal callback to continue routing
producerCallback.done(false);
}
}
}
});
}
Aggregations