use of io.netty.channel.ChannelFuture in project flink by apache.
the class HttpTestClient method sendRequest.
/**
* Sends a request to to the server.
*
* <pre>
* HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/overview");
* request.headers().set(HttpHeaders.Names.HOST, host);
* request.headers().set(HttpHeaders.Names.CONNECTION, HttpHeaders.Values.CLOSE);
*
* sendRequest(request);
* </pre>
*
* @param request The {@link HttpRequest} to send to the server
*/
public void sendRequest(HttpRequest request, FiniteDuration timeout) throws InterruptedException, TimeoutException {
LOG.debug("Writing {}.", request);
// Make the connection attempt.
ChannelFuture connect = bootstrap.connect(host, port);
Channel channel;
if (connect.await(timeout.toMillis(), TimeUnit.MILLISECONDS)) {
channel = connect.channel();
} else {
throw new TimeoutException("Connection failed");
}
channel.writeAndFlush(request);
}
use of io.netty.channel.ChannelFuture in project flink by apache.
the class ClientTransportErrorHandlingTest method testExceptionOnWrite.
/**
* Verifies that failed client requests via {@link PartitionRequestClient} are correctly
* attributed to the respective {@link RemoteInputChannel}.
*/
@Test
public void testExceptionOnWrite() throws Exception {
NettyProtocol protocol = new NettyProtocol() {
@Override
public ChannelHandler[] getServerChannelHandlers() {
return new ChannelHandler[0];
}
@Override
public ChannelHandler[] getClientChannelHandlers() {
return new PartitionRequestProtocol(mock(ResultPartitionProvider.class), mock(TaskEventDispatcher.class), mock(NetworkBufferPool.class)).getClientChannelHandlers();
}
};
// We need a real server and client in this test, because Netty's EmbeddedChannel is
// not failing the ChannelPromise of failed writes.
NettyServerAndClient serverAndClient = initServerAndClient(protocol, createConfig());
Channel ch = connect(serverAndClient);
PartitionRequestClientHandler handler = getClientHandler(ch);
// Last outbound handler throws Exception after 1st write
ch.pipeline().addFirst(new ChannelOutboundHandlerAdapter() {
int writeNum = 0;
@Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception {
if (writeNum >= 1) {
throw new RuntimeException("Expected test exception.");
}
writeNum++;
ctx.write(msg, promise);
}
});
PartitionRequestClient requestClient = new PartitionRequestClient(ch, handler, mock(ConnectionID.class), mock(PartitionRequestClientFactory.class));
// Create input channels
RemoteInputChannel[] rich = new RemoteInputChannel[] { createRemoteInputChannel(), createRemoteInputChannel() };
final CountDownLatch sync = new CountDownLatch(1);
// Do this with explicit synchronization. Otherwise this is not robust against slow timings
// of the callback (e.g. we cannot just verify that it was called once, because there is
// a chance that we do this too early).
doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
sync.countDown();
return null;
}
}).when(rich[1]).onError(isA(LocalTransportException.class));
// First request is successful
ChannelFuture f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[0], 0);
assertTrue(f.await().isSuccess());
// Second request is *not* successful
f = requestClient.requestSubpartition(new ResultPartitionID(), 0, rich[1], 0);
assertFalse(f.await().isSuccess());
// Only the second channel should be notified about the error
verify(rich[0], times(0)).onError(any(LocalTransportException.class));
// Wait for the notification
if (!sync.await(TestingUtils.TESTING_DURATION().toMillis(), TimeUnit.MILLISECONDS)) {
fail("Timed out after waiting for " + TestingUtils.TESTING_DURATION().toMillis() + " ms to be notified about the channel error.");
}
shutdown(serverAndClient);
}
use of io.netty.channel.ChannelFuture in project redisson by redisson.
the class CommandBatchService method execute.
private void execute(final Entry entry, final NodeSource source, final RPromise<Void> mainPromise, final AtomicInteger slots, final int attempt, final boolean noResult) {
if (mainPromise.isCancelled()) {
return;
}
if (!connectionManager.getShutdownLatch().acquire()) {
mainPromise.tryFailure(new IllegalStateException("Redisson is shutdown"));
return;
}
final RPromise<Void> attemptPromise = connectionManager.newPromise();
final AsyncDetails details = new AsyncDetails();
final RFuture<RedisConnection> connectionFuture;
if (entry.isReadOnlyMode()) {
connectionFuture = connectionManager.connectionReadOp(source, null);
} else {
connectionFuture = connectionManager.connectionWriteOp(source, null);
}
final TimerTask retryTimerTask = new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
if (attemptPromise.isDone()) {
return;
}
if (connectionFuture.cancel(false)) {
connectionManager.getShutdownLatch().release();
} else {
if (connectionFuture.isSuccess()) {
ChannelFuture writeFuture = details.getWriteFuture();
if (writeFuture != null && !writeFuture.cancel(false) && writeFuture.isSuccess()) {
return;
}
}
}
if (mainPromise.isCancelled()) {
attemptPromise.cancel(false);
return;
}
if (attempt == connectionManager.getConfig().getRetryAttempts()) {
if (details.getException() == null) {
details.setException(new RedisTimeoutException("Batch command execution timeout"));
}
attemptPromise.tryFailure(details.getException());
return;
}
if (!attemptPromise.cancel(false)) {
return;
}
int count = attempt + 1;
execute(entry, source, mainPromise, slots, count, noResult);
}
};
Timeout timeout = connectionManager.newTimeout(retryTimerTask, connectionManager.getConfig().getRetryInterval(), TimeUnit.MILLISECONDS);
details.setTimeout(timeout);
connectionFuture.addListener(new FutureListener<RedisConnection>() {
@Override
public void operationComplete(Future<RedisConnection> connFuture) throws Exception {
checkConnectionFuture(entry, source, mainPromise, attemptPromise, details, connectionFuture, noResult);
}
});
attemptPromise.addListener(new FutureListener<Void>() {
@Override
public void operationComplete(Future<Void> future) throws Exception {
details.getTimeout().cancel();
if (future.isCancelled()) {
return;
}
if (future.cause() instanceof RedisMovedException) {
RedisMovedException ex = (RedisMovedException) future.cause();
entry.clearErrors();
execute(entry, new NodeSource(ex.getSlot(), ex.getAddr(), Redirect.MOVED), mainPromise, slots, attempt, noResult);
return;
}
if (future.cause() instanceof RedisAskException) {
RedisAskException ex = (RedisAskException) future.cause();
entry.clearErrors();
execute(entry, new NodeSource(ex.getSlot(), ex.getAddr(), Redirect.ASK), mainPromise, slots, attempt, noResult);
return;
}
if (future.cause() instanceof RedisLoadingException) {
entry.clearErrors();
execute(entry, source, mainPromise, slots, attempt, noResult);
return;
}
if (future.cause() instanceof RedisTryAgainException) {
entry.clearErrors();
connectionManager.newTimeout(new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
execute(entry, source, mainPromise, slots, attempt, noResult);
}
}, 1, TimeUnit.SECONDS);
return;
}
if (future.isSuccess()) {
if (slots.decrementAndGet() == 0) {
mainPromise.trySuccess(future.getNow());
}
} else {
mainPromise.tryFailure(future.cause());
}
}
});
}
use of io.netty.channel.ChannelFuture in project redisson by redisson.
the class CommandAsyncService method checkWriteFuture.
private <V, R> void checkWriteFuture(final AsyncDetails<V, R> details, final RedisConnection connection) {
ChannelFuture future = details.getWriteFuture();
if (details.getAttemptPromise().isDone() || future.isCancelled()) {
return;
}
if (!future.isSuccess()) {
details.setException(new WriteRedisConnectionException("Can't write command: " + details.getCommand() + ", params: " + LogHelper.toString(details.getParams()) + " to channel: " + future.channel(), future.cause()));
return;
}
details.getTimeout().cancel();
long timeoutTime = connectionManager.getConfig().getTimeout();
if (RedisCommands.BLOCKING_COMMANDS.contains(details.getCommand().getName())) {
Long popTimeout = Long.valueOf(details.getParams()[details.getParams().length - 1].toString());
handleBlockingOperations(details, connection, popTimeout);
if (popTimeout == 0) {
return;
}
timeoutTime += popTimeout * 1000;
// add 1 second due to issue https://github.com/antirez/redis/issues/874
timeoutTime += 1000;
}
final long timeoutAmount = timeoutTime;
TimerTask timeoutTask = new TimerTask() {
@Override
public void run(Timeout timeout) throws Exception {
details.getAttemptPromise().tryFailure(new RedisTimeoutException("Redis server response timeout (" + timeoutAmount + " ms) occured for command: " + details.getCommand() + " with params: " + LogHelper.toString(details.getParams()) + " channel: " + connection.getChannel()));
}
};
Timeout timeout = connectionManager.newTimeout(timeoutTask, timeoutTime, TimeUnit.MILLISECONDS);
details.setTimeout(timeout);
}
use of io.netty.channel.ChannelFuture in project redisson by redisson.
the class MasterSlaveEntry method slaveDown.
private boolean slaveDown(ClientConnectionsEntry entry, boolean temporaryDown) {
// add master as slave if no more slaves available
if (config.getReadMode() == ReadMode.SLAVE && slaveBalancer.getAvailableClients() == 0) {
InetSocketAddress addr = masterEntry.getClient().getAddr();
if (slaveUp(addr.getHostName(), addr.getPort(), FreezeReason.SYSTEM)) {
log.info("master {}:{} used as slave", addr.getHostName(), addr.getPort());
}
}
// close all connections
while (true) {
final RedisConnection connection = entry.pollConnection();
if (connection == null) {
break;
}
connection.closeAsync().addListener(new ChannelFutureListener() {
@Override
public void operationComplete(ChannelFuture future) throws Exception {
reattachBlockingQueue(connection);
}
});
}
// close all pub/sub connections
while (true) {
RedisPubSubConnection connection = entry.pollSubscribeConnection();
if (connection == null) {
break;
}
connection.closeAsync();
}
for (RedisPubSubConnection connection : entry.getAllSubscribeConnections()) {
reattachPubSub(connection, temporaryDown);
}
entry.getAllSubscribeConnections().clear();
return true;
}
Aggregations