use of io.grpc.internal.KeepAliveManager in project grpc-java by grpc.
the class NettyClientTransport method start.
@SuppressWarnings("unchecked")
@Override
public Runnable start(Listener transportListener) {
lifecycleManager = new ClientTransportLifecycleManager(Preconditions.checkNotNull(transportListener, "listener"));
handler = newHandler();
HandlerSettings.setAutoWindow(handler);
negotiationHandler = negotiator.newHandler(handler);
Bootstrap b = new Bootstrap();
b.group(group);
b.channel(channelType);
if (NioSocketChannel.class.isAssignableFrom(channelType)) {
b.option(SO_KEEPALIVE, true);
}
for (Map.Entry<ChannelOption<?>, ?> entry : channelOptions.entrySet()) {
// Every entry in the map is obtained from
// NettyChannelBuilder#withOption(ChannelOption<T> option, T value)
// so it is safe to pass the key-value pair to b.option().
b.option((ChannelOption<Object>) entry.getKey(), entry.getValue());
}
/**
* We don't use a ChannelInitializer in the client bootstrap because its "initChannel" method
* is executed in the event loop and we need this handler to be in the pipeline immediately so
* that it may begin buffering writes.
*/
b.handler(negotiationHandler);
channel = b.register().channel();
// Start the connection operation to the server.
channel.connect(address).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
ChannelHandlerContext ctx = future.channel().pipeline().context(handler);
if (ctx != null) {
// NettyClientHandler doesn't propagate exceptions, but the negotiator will need the
// exception to fail any writes. Note that this fires after handler, because it is as if
// handler was propagating the notification.
ctx.fireExceptionCaught(future.cause());
}
future.channel().pipeline().fireExceptionCaught(future.cause());
}
}
});
// Start the write queue as soon as the channel is constructed
handler.startWriteQueue(channel);
// This write will have no effect, yet it will only complete once the negotiationHandler
// flushes any pending writes.
channel.write(NettyClientHandler.NOOP_MESSAGE).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (!future.isSuccess()) {
// Need to notify of this failure, because NettyClientHandler may not have been added to
// the pipeline before the error occurred.
lifecycleManager.notifyTerminated(Utils.statusFromThrowable(future.cause()));
}
}
});
// Handle transport shutdown when the channel is closed.
channel.closeFuture().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
// Typically we should have noticed shutdown before this point.
lifecycleManager.notifyTerminated(Status.INTERNAL.withDescription("Connection closed with unknown cause"));
}
});
if (enableKeepAlive) {
keepAliveManager = new KeepAliveManager(this, channel.eventLoop(), keepAliveDelayNanos, keepAliveTimeoutNanos, false);
keepAliveManager.onTransportStarted();
}
return null;
}
use of io.grpc.internal.KeepAliveManager in project grpc-java by grpc.
the class OkHttpClientTransport method start.
@Override
public Runnable start(Listener listener) {
this.listener = Preconditions.checkNotNull(listener, "listener");
if (enableKeepAlive) {
scheduler = SharedResourceHolder.get(TIMER_SERVICE);
keepAliveManager = new KeepAliveManager(this, scheduler, keepAliveDelayNanos, keepAliveTimeoutNanos, false);
keepAliveManager.onTransportStarted();
}
frameWriter = new AsyncFrameWriter(this, serializingExecutor);
outboundFlow = new OutboundFlowController(this, frameWriter);
// Connecting in the serializingExecutor, so that some stream operations like synStream
// will be executed after connected.
serializingExecutor.execute(new Runnable() {
@Override
public void run() {
if (isForTest()) {
if (connectingCallback != null) {
connectingCallback.run();
}
clientFrameHandler = new ClientFrameHandler(testFrameReader);
executor.execute(clientFrameHandler);
synchronized (lock) {
maxConcurrentStreams = Integer.MAX_VALUE;
startPendingStreams();
}
frameWriter.becomeConnected(testFrameWriter, socket);
connectedFuture.set(null);
return;
}
// Use closed source on failure so that the reader immediately shuts down.
BufferedSource source = Okio.buffer(new Source() {
@Override
public long read(Buffer sink, long byteCount) {
return -1;
}
@Override
public Timeout timeout() {
return Timeout.NONE;
}
@Override
public void close() {
}
});
Variant variant = new Http2();
BufferedSink sink;
Socket sock;
try {
if (proxyAddress == null) {
sock = new Socket(address.getAddress(), address.getPort());
} else {
sock = createHttpProxySocket(address, proxyAddress, proxyUsername, proxyPassword);
}
if (sslSocketFactory != null) {
sock = OkHttpTlsUpgrader.upgrade(sslSocketFactory, sock, getOverridenHost(), getOverridenPort(), connectionSpec);
}
sock.setTcpNoDelay(true);
source = Okio.buffer(Okio.source(sock));
sink = Okio.buffer(Okio.sink(sock));
} catch (StatusException e) {
startGoAway(0, ErrorCode.INTERNAL_ERROR, e.getStatus());
return;
} catch (Exception e) {
onException(e);
return;
} finally {
clientFrameHandler = new ClientFrameHandler(variant.newReader(source, true));
executor.execute(clientFrameHandler);
}
FrameWriter rawFrameWriter;
synchronized (lock) {
socket = sock;
maxConcurrentStreams = Integer.MAX_VALUE;
startPendingStreams();
}
rawFrameWriter = variant.newWriter(sink, true);
frameWriter.becomeConnected(rawFrameWriter, socket);
try {
// Do these with the raw FrameWriter, so that they will be done in this thread,
// and before any possible pending stream operations.
rawFrameWriter.connectionPreface();
Settings settings = new Settings();
rawFrameWriter.settings(settings);
} catch (Exception e) {
onException(e);
return;
}
}
});
return null;
}
Aggregations