use of io.netty.util.concurrent.Promise in project hbase by apache.
the class NettyRpcConnection method saslNegotiate.
private void saslNegotiate(final Channel ch) {
UserGroupInformation ticket = getUGI();
if (ticket == null) {
failInit(ch, new FatalConnectionException("ticket/user is null"));
return;
}
Promise<Boolean> saslPromise = ch.eventLoop().newPromise();
final NettyHBaseSaslRpcClientHandler saslHandler;
try {
saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, authMethod, token, serverPrincipal, rpcClient.fallbackAllowed, this.rpcClient.conf);
} catch (IOException e) {
failInit(ch, e);
return;
}
ch.pipeline().addFirst(new SaslChallengeDecoder(), saslHandler);
saslPromise.addListener(new FutureListener<Boolean>() {
@Override
public void operationComplete(Future<Boolean> future) throws Exception {
if (future.isSuccess()) {
ChannelPipeline p = ch.pipeline();
p.remove(SaslChallengeDecoder.class);
p.remove(NettyHBaseSaslRpcClientHandler.class);
// check if negotiate with server for connection header is necessary
if (saslHandler.isNeedProcessConnectionHeader()) {
Promise<Boolean> connectionHeaderPromise = ch.eventLoop().newPromise();
// create the handler to handle the connection header
ChannelHandler chHandler = new NettyHBaseRpcConnectionHeaderHandler(connectionHeaderPromise, conf, connectionHeaderWithLength);
// add ReadTimeoutHandler to deal with server doesn't response connection header
// because of the different configuration in client side and server side
p.addFirst(new ReadTimeoutHandler(RpcClient.DEFAULT_SOCKET_TIMEOUT_READ, TimeUnit.MILLISECONDS));
p.addLast(chHandler);
connectionHeaderPromise.addListener(new FutureListener<Boolean>() {
@Override
public void operationComplete(Future<Boolean> future) throws Exception {
if (future.isSuccess()) {
ChannelPipeline p = ch.pipeline();
p.remove(ReadTimeoutHandler.class);
p.remove(NettyHBaseRpcConnectionHeaderHandler.class);
// don't send connection header, NettyHbaseRpcConnectionHeaderHandler
// sent it already
established(ch);
} else {
final Throwable error = future.cause();
scheduleRelogin(error);
failInit(ch, toIOE(error));
}
}
});
} else {
// send the connection header to server
ch.write(connectionHeaderWithLength.retainedDuplicate());
established(ch);
}
} else {
final Throwable error = future.cause();
scheduleRelogin(error);
failInit(ch, toIOE(error));
}
}
});
}
use of io.netty.util.concurrent.Promise in project hive by apache.
the class Rpc method createClient.
/**
* Creates an RPC client for a server running on the given remote host and port.
*
* @param config RPC configuration data.
* @param eloop Event loop for managing the connection.
* @param host Host name or IP address to connect to.
* @param port Port where server is listening.
* @param clientId The client ID that identifies the connection.
* @param secret Secret for authenticating the client with the server.
* @param dispatcher Dispatcher used to handle RPC calls.
* @return A future that can be used to monitor the creation of the RPC object.
*/
public static Promise<Rpc> createClient(Map<String, String> config, final NioEventLoopGroup eloop, String host, int port, final String clientId, final String secret, final RpcDispatcher dispatcher) throws Exception {
final RpcConfiguration rpcConf = new RpcConfiguration(config);
int connectTimeoutMs = (int) rpcConf.getConnectTimeoutMs();
final ChannelFuture cf = new Bootstrap().group(eloop).handler(new ChannelInboundHandlerAdapter() {
}).channel(NioSocketChannel.class).option(ChannelOption.SO_KEEPALIVE, true).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, connectTimeoutMs).connect(host, port);
final Promise<Rpc> promise = eloop.next().newPromise();
final AtomicReference<Rpc> rpc = new AtomicReference<Rpc>();
// Set up a timeout to undo everything.
final Runnable timeoutTask = new Runnable() {
@Override
public void run() {
promise.setFailure(new TimeoutException("Timed out waiting for RPC server connection."));
}
};
final ScheduledFuture<?> timeoutFuture = eloop.schedule(timeoutTask, connectTimeoutMs, TimeUnit.MILLISECONDS);
// The channel listener instantiates the Rpc instance when the connection is established,
// and initiates the SASL handshake.
cf.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture cf) throws Exception {
if (cf.isSuccess()) {
SaslClientHandler saslHandler = new SaslClientHandler(rpcConf, clientId, promise, timeoutFuture, secret, dispatcher);
Rpc rpc = createRpc(rpcConf, saslHandler, (SocketChannel) cf.channel(), eloop);
saslHandler.rpc = rpc;
saslHandler.sendHello(cf.channel());
} else {
promise.setFailure(cf.cause());
}
}
});
// Handle cancellation of the promise.
promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
@Override
public void operationComplete(Promise<Rpc> p) {
if (p.isCancelled()) {
cf.cancel(true);
}
}
});
return promise;
}
use of io.netty.util.concurrent.Promise in project hive by apache.
the class RpcServer method registerClient.
@VisibleForTesting
Future<Rpc> registerClient(final String clientId, String secret, RpcDispatcher serverDispatcher, long clientTimeoutMs) {
final Promise<Rpc> promise = group.next().newPromise();
Runnable timeout = new Runnable() {
@Override
public void run() {
promise.setFailure(new TimeoutException("Timed out waiting for client connection."));
}
};
ScheduledFuture<?> timeoutFuture = group.schedule(timeout, clientTimeoutMs, TimeUnit.MILLISECONDS);
final ClientInfo client = new ClientInfo(clientId, promise, secret, serverDispatcher, timeoutFuture);
if (pendingClients.putIfAbsent(clientId, client) != null) {
throw new IllegalStateException(String.format("Client '%s' already registered.", clientId));
}
promise.addListener(new GenericFutureListener<Promise<Rpc>>() {
@Override
public void operationComplete(Promise<Rpc> p) {
if (!p.isSuccess()) {
pendingClients.remove(clientId);
}
}
});
return promise;
}
use of io.netty.util.concurrent.Promise in project netty by netty.
the class SocksServerConnectHandler method channelRead0.
@Override
public void channelRead0(final ChannelHandlerContext ctx, final SocksMessage message) throws Exception {
if (message instanceof Socks4CommandRequest) {
final Socks4CommandRequest request = (Socks4CommandRequest) message;
Promise<Channel> promise = ctx.executor().newPromise();
promise.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(final Future<Channel> future) throws Exception {
final Channel outboundChannel = future.getNow();
if (future.isSuccess()) {
ChannelFuture responseFuture = ctx.channel().writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.SUCCESS));
responseFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture channelFuture) {
ctx.pipeline().remove(SocksServerConnectHandler.this);
outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
ctx.pipeline().addLast(new RelayHandler(outboundChannel));
}
});
} else {
ctx.channel().writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
SocksServerUtils.closeOnFlush(ctx.channel());
}
}
});
final Channel inboundChannel = ctx.channel();
b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true).handler(new DirectClientHandler(promise));
b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
// Connection established use handler provided results
} else {
// Close the connection if the connection attempt has failed.
ctx.channel().writeAndFlush(new DefaultSocks4CommandResponse(Socks4CommandStatus.REJECTED_OR_FAILED));
SocksServerUtils.closeOnFlush(ctx.channel());
}
}
});
} else if (message instanceof Socks5CommandRequest) {
final Socks5CommandRequest request = (Socks5CommandRequest) message;
Promise<Channel> promise = ctx.executor().newPromise();
promise.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(final Future<Channel> future) throws Exception {
final Channel outboundChannel = future.getNow();
if (future.isSuccess()) {
ChannelFuture responseFuture = ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.SUCCESS, request.dstAddrType(), request.dstAddr(), request.dstPort()));
responseFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture channelFuture) {
ctx.pipeline().remove(SocksServerConnectHandler.this);
outboundChannel.pipeline().addLast(new RelayHandler(ctx.channel()));
ctx.pipeline().addLast(new RelayHandler(outboundChannel));
}
});
} else {
ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE, request.dstAddrType()));
SocksServerUtils.closeOnFlush(ctx.channel());
}
}
});
final Channel inboundChannel = ctx.channel();
b.group(inboundChannel.eventLoop()).channel(NioSocketChannel.class).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 10000).option(ChannelOption.SO_KEEPALIVE, true).handler(new DirectClientHandler(promise));
b.connect(request.dstAddr(), request.dstPort()).addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
if (future.isSuccess()) {
// Connection established use handler provided results
} else {
// Close the connection if the connection attempt has failed.
ctx.channel().writeAndFlush(new DefaultSocks5CommandResponse(Socks5CommandStatus.FAILURE, request.dstAddrType()));
SocksServerUtils.closeOnFlush(ctx.channel());
}
}
});
} else {
ctx.close();
}
}
Aggregations