use of io.netty.util.concurrent.FutureListener in project redisson by redisson.
the class ReplicatedConnectionManager method scheduleMasterChangeCheck.
private void scheduleMasterChangeCheck(final ReplicatedServersConfig cfg) {
monitorFuture = GlobalEventExecutor.INSTANCE.schedule(new Runnable() {
@Override
public void run() {
final URL master = currentMaster.get();
log.debug("Current master: {}", master);
final AtomicInteger count = new AtomicInteger(cfg.getNodeAddresses().size());
for (final URL addr : cfg.getNodeAddresses()) {
if (isShuttingDown()) {
return;
}
RFuture<RedisConnection> connectionFuture = connect(cfg, addr);
connectionFuture.addListener(new FutureListener<RedisConnection>() {
@Override
public void operationComplete(Future<RedisConnection> future) throws Exception {
if (!future.isSuccess()) {
log.error(future.cause().getMessage(), future.cause());
if (count.decrementAndGet() == 0) {
scheduleMasterChangeCheck(cfg);
}
return;
}
if (isShuttingDown()) {
return;
}
RedisConnection connection = future.getNow();
RFuture<Map<String, String>> result = connection.async(RedisCommands.INFO_REPLICATION);
result.addListener(new FutureListener<Map<String, String>>() {
@Override
public void operationComplete(Future<Map<String, String>> future) throws Exception {
if (!future.isSuccess()) {
log.error(future.cause().getMessage(), future.cause());
if (count.decrementAndGet() == 0) {
scheduleMasterChangeCheck(cfg);
}
return;
}
Role role = Role.valueOf(future.getNow().get(ROLE_KEY));
if (Role.master.equals(role)) {
if (master.equals(addr)) {
log.debug("Current master {} unchanged", master);
} else if (currentMaster.compareAndSet(master, addr)) {
log.info("Master has changed from {} to {}", master, addr);
changeMaster(singleSlotRange.getStartSlot(), addr.getHost(), addr.getPort());
}
}
if (count.decrementAndGet() == 0) {
scheduleMasterChangeCheck(cfg);
}
}
});
}
});
}
}
}, cfg.getScanInterval(), TimeUnit.MILLISECONDS);
}
use of io.netty.util.concurrent.FutureListener in project redisson by redisson.
the class RedisClient method resolveAddr.
public CompletableFuture<InetSocketAddress> resolveAddr() {
if (resolvedAddrFuture.get() != null) {
return resolvedAddrFuture.get();
}
CompletableFuture<InetSocketAddress> promise = new CompletableFuture<>();
if (!resolvedAddrFuture.compareAndSet(null, promise)) {
return resolvedAddrFuture.get();
}
byte[] addr = NetUtil.createByteArrayFromIpAddressString(uri.getHost());
if (addr != null) {
try {
resolvedAddr = new InetSocketAddress(InetAddress.getByAddress(uri.getHost(), addr), uri.getPort());
} catch (UnknownHostException e) {
// skip
}
promise.complete(resolvedAddr);
return promise;
}
AddressResolver<InetSocketAddress> resolver = (AddressResolver<InetSocketAddress>) bootstrap.config().resolver().getResolver(bootstrap.config().group().next());
Future<InetSocketAddress> resolveFuture = resolver.resolve(InetSocketAddress.createUnresolved(uri.getHost(), uri.getPort()));
resolveFuture.addListener((FutureListener<InetSocketAddress>) future -> {
if (!future.isSuccess()) {
promise.completeExceptionally(future.cause());
return;
}
InetSocketAddress resolved = future.getNow();
byte[] addr1 = resolved.getAddress().getAddress();
resolvedAddr = new InetSocketAddress(InetAddress.getByAddress(uri.getHost(), addr1), resolved.getPort());
promise.complete(resolvedAddr);
});
return promise;
}
use of io.netty.util.concurrent.FutureListener in project redisson by redisson.
the class RedisExecutor method handleBlockingOperations.
private void handleBlockingOperations(CompletableFuture<R> attemptPromise, RedisConnection connection, Long popTimeout) {
FutureListener<Void> listener = f -> {
mainPromise.completeExceptionally(new RedissonShutdownException("Redisson is shutdown"));
};
Timeout scheduledFuture;
if (popTimeout != 0) {
// handling cases when connection has been lost
scheduledFuture = connectionManager.newTimeout(timeout -> {
if (attemptPromise.complete(null)) {
connection.forceFastReconnectAsync();
}
}, popTimeout + 1, TimeUnit.SECONDS);
} else {
scheduledFuture = null;
}
mainPromise.whenComplete((res, e) -> {
if (scheduledFuture != null) {
scheduledFuture.cancel();
}
synchronized (listener) {
connectionManager.getShutdownPromise().removeListener(listener);
}
// handling cancel operation for blocking commands
if ((mainPromise.isCancelled() || e instanceof InterruptedException) && !attemptPromise.isDone()) {
log.debug("Canceled blocking operation {} used {}", command, connection);
connection.forceFastReconnectAsync().whenComplete((r, ex) -> {
attemptPromise.cancel(true);
});
return;
}
if (e instanceof RedissonShutdownException) {
attemptPromise.completeExceptionally(e);
}
});
synchronized (listener) {
if (!mainPromise.isDone()) {
connectionManager.getShutdownPromise().addListener(listener);
}
}
}
use of io.netty.util.concurrent.FutureListener in project redisson by redisson.
the class SentinelConnectionManager method performSentinelDNSCheck.
private void performSentinelDNSCheck(FutureListener<List<InetSocketAddress>> commonListener) {
for (RedisURI host : sentinelHosts) {
Future<List<InetSocketAddress>> allNodes = sentinelResolver.resolveAll(InetSocketAddress.createUnresolved(host.getHost(), host.getPort()));
allNodes.addListener((FutureListener<List<InetSocketAddress>>) future -> {
if (!future.isSuccess()) {
log.error("Unable to resolve " + host.getHost(), future.cause());
return;
}
future.getNow().stream().map(addr -> toURI(addr)).filter(uri -> !sentinels.containsKey(uri)).forEach(uri -> registerSentinel(uri, getConfig(), host.getHost()));
});
if (commonListener != null) {
allNodes.addListener(commonListener);
}
}
}
use of io.netty.util.concurrent.FutureListener in project netty by netty.
the class SslHandler method safeClose.
private void safeClose(final ChannelHandlerContext ctx, final ChannelFuture flushFuture, final ChannelPromise promise) {
if (!ctx.channel().isActive()) {
ctx.close(promise);
return;
}
final Future<?> timeoutFuture;
if (!flushFuture.isDone()) {
long closeNotifyTimeout = closeNotifyFlushTimeoutMillis;
if (closeNotifyTimeout > 0) {
// Force-close the connection if close_notify is not fully sent in time.
timeoutFuture = ctx.executor().schedule(new Runnable() {
@Override
public void run() {
// May be done in the meantime as cancel(...) is only best effort.
if (!flushFuture.isDone()) {
logger.warn("{} Last write attempt timed out; force-closing the connection.", ctx.channel());
addCloseListener(ctx.close(ctx.newPromise()), promise);
}
}
}, closeNotifyTimeout, TimeUnit.MILLISECONDS);
} else {
timeoutFuture = null;
}
} else {
timeoutFuture = null;
}
// Close the connection if close_notify is sent in time.
flushFuture.addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture f) {
if (timeoutFuture != null) {
timeoutFuture.cancel(false);
}
final long closeNotifyReadTimeout = closeNotifyReadTimeoutMillis;
if (closeNotifyReadTimeout <= 0) {
// Trigger the close in all cases to make sure the promise is notified
// See https://github.com/netty/netty/issues/2358
addCloseListener(ctx.close(ctx.newPromise()), promise);
} else {
final Future<?> closeNotifyReadTimeoutFuture;
if (!sslClosePromise.isDone()) {
closeNotifyReadTimeoutFuture = ctx.executor().schedule(new Runnable() {
@Override
public void run() {
if (!sslClosePromise.isDone()) {
logger.debug("{} did not receive close_notify in {}ms; force-closing the connection.", ctx.channel(), closeNotifyReadTimeout);
// Do the close now...
addCloseListener(ctx.close(ctx.newPromise()), promise);
}
}
}, closeNotifyReadTimeout, TimeUnit.MILLISECONDS);
} else {
closeNotifyReadTimeoutFuture = null;
}
// Do the close once the we received the close_notify.
sslClosePromise.addListener(new FutureListener<Channel>() {
@Override
public void operationComplete(Future<Channel> future) throws Exception {
if (closeNotifyReadTimeoutFuture != null) {
closeNotifyReadTimeoutFuture.cancel(false);
}
addCloseListener(ctx.close(ctx.newPromise()), promise);
}
});
}
}
});
}
Aggregations