use of io.netty.channel.Channel in project async-http-client by AsyncHttpClient.
the class ChannelManager method getClientStats.
public ClientStats getClientStats() {
Map<String, Long> totalConnectionsPerHost = openChannels.stream().map(Channel::remoteAddress).filter(a -> a.getClass() == InetSocketAddress.class).map(a -> (InetSocketAddress) a).map(InetSocketAddress::getHostName).collect(Collectors.groupingBy(Function.identity(), Collectors.counting()));
Map<String, Long> idleConnectionsPerHost = channelPool.getIdleChannelCountPerHost();
Map<String, HostStats> statsPerHost = totalConnectionsPerHost.entrySet().stream().collect(Collectors.toMap(Entry::getKey, entry -> {
final long totalConnectionCount = entry.getValue();
final long idleConnectionCount = idleConnectionsPerHost.getOrDefault(entry.getKey(), 0L);
final long activeConnectionCount = totalConnectionCount - idleConnectionCount;
return new HostStats(activeConnectionCount, idleConnectionCount);
}));
return new ClientStats(statsPerHost);
}
use of io.netty.channel.Channel in project async-http-client by AsyncHttpClient.
the class NettyRequestSender method sendRequestThroughSslProxy.
/**
* Using CONNECT depends on wither we can fetch a valid channel or not Loop until we get a valid channel from the pool and it's still valid once the request is built @
*/
@SuppressWarnings("unused")
private <//
T> //
ListenableFuture<T> sendRequestThroughSslProxy(//
Request request, //
AsyncHandler<T> asyncHandler, //
NettyResponseFuture<T> future, //
boolean performingNextRequest, ProxyServer proxyServer) {
NettyResponseFuture<T> newFuture = null;
for (int i = 0; i < 3; i++) {
Channel channel = getOpenChannel(future, request, proxyServer, asyncHandler);
if (Channels.isChannelValid(channel))
if (newFuture == null)
newFuture = newNettyRequestAndResponseFuture(request, asyncHandler, future, proxyServer, false);
if (Channels.isChannelValid(channel))
// gain
return sendRequestWithOpenChannel(request, proxyServer, newFuture, asyncHandler, channel);
else
// pool is empty
break;
}
newFuture = newNettyRequestAndResponseFuture(request, asyncHandler, future, proxyServer, true);
return sendRequestWithNewChannel(request, proxyServer, newFuture, asyncHandler, performingNextRequest);
}
use of io.netty.channel.Channel in project async-http-client by AsyncHttpClient.
the class NettyChannelConnector method connect0.
private void connect0(Bootstrap bootstrap, final NettyConnectListener<?> connectListener, InetSocketAddress remoteAddress) {
//
bootstrap.connect(remoteAddress, localAddress).addListener(new SimpleChannelFutureListener() {
@Override
public void onSuccess(Channel channel) {
if (asyncHandlerExtensions != null) {
asyncHandlerExtensions.onTcpConnectSuccess(remoteAddress, channel);
}
connectListener.onSuccess(channel, remoteAddress);
}
@Override
public void onFailure(Channel channel, Throwable t) {
if (asyncHandlerExtensions != null)
asyncHandlerExtensions.onTcpConnectFailure(remoteAddress, t);
boolean retry = pickNextRemoteAddress();
if (retry)
NettyChannelConnector.this.connect(bootstrap, connectListener);
else
connectListener.onFailure(channel, t);
}
});
}
use of io.netty.channel.Channel in project alluxio by Alluxio.
the class NettyRemoteBlockWriter method write.
@Override
public void write(byte[] bytes, int offset, int length) throws IOException {
Channel channel = null;
ClientHandler clientHandler = null;
Metrics.NETTY_BLOCK_WRITE_OPS.inc();
try {
channel = mContext.acquireNettyChannel(mAddress);
if (!(channel.pipeline().last() instanceof ClientHandler)) {
channel.pipeline().addLast(new ClientHandler());
}
clientHandler = (ClientHandler) channel.pipeline().last();
SingleResponseListener listener = new SingleResponseListener();
clientHandler.addListener(listener);
ChannelFuture channelFuture = channel.writeAndFlush(new RPCBlockWriteRequest(mSessionId, mBlockId, mWrittenBytes, length, new DataByteArrayChannel(bytes, offset, length))).sync();
if (channelFuture.isDone() && !channelFuture.isSuccess()) {
LOG.error("Failed to write to %s for block %d with error %s.", mAddress.toString(), mBlockId, channelFuture.cause());
throw new IOException(channelFuture.cause());
}
RPCResponse response = listener.get(NettyClient.TIMEOUT_MS, TimeUnit.MILLISECONDS);
switch(response.getType()) {
case RPC_BLOCK_WRITE_RESPONSE:
RPCBlockWriteResponse resp = (RPCBlockWriteResponse) response;
RPCResponse.Status status = resp.getStatus();
LOG.debug("status: {} from remote machine {} received", status, mAddress);
if (status != RPCResponse.Status.SUCCESS) {
throw new IOException(ExceptionMessage.BLOCK_WRITE_ERROR.getMessage(mBlockId, mSessionId, mAddress, status.getMessage()));
}
mWrittenBytes += length;
break;
case RPC_ERROR_RESPONSE:
RPCErrorResponse error = (RPCErrorResponse) response;
throw new IOException(error.getStatus().getMessage());
default:
throw new IOException(ExceptionMessage.UNEXPECTED_RPC_RESPONSE.getMessage(response.getType(), RPCMessage.Type.RPC_BLOCK_WRITE_RESPONSE));
}
} catch (Exception e) {
Metrics.NETTY_BLOCK_WRITE_FAILURES.inc();
try {
// TODO(peis): We should not close the channel unless it is an exception caused by network.
if (channel != null) {
channel.close().sync();
}
} catch (InterruptedException ee) {
Throwables.propagate(ee);
}
throw new IOException(e);
} finally {
if (clientHandler != null) {
clientHandler.removeListeners();
}
if (channel != null) {
mContext.releaseNettyChannel(mAddress, channel);
}
}
}
use of io.netty.channel.Channel in project alluxio by Alluxio.
the class NettyUnderFileSystemBlockReader method read.
@Override
public ByteBuffer read(InetSocketAddress address, long blockId, long offset, long length, long sessionId, boolean noCache) throws IOException {
Channel channel = null;
ClientHandler clientHandler = null;
Metrics.NETTY_UFS_BLOCK_READ_OPS.inc();
try {
channel = mContext.acquireNettyChannel(address);
if (!(channel.pipeline().last() instanceof ClientHandler)) {
channel.pipeline().addLast(new ClientHandler());
}
clientHandler = (ClientHandler) channel.pipeline().last();
SingleResponseListener listener = new SingleResponseListener();
clientHandler.addListener(listener);
ChannelFuture channelFuture = channel.writeAndFlush(new RPCUnderFileSystemBlockReadRequest(blockId, offset, length, sessionId, noCache));
channelFuture = channelFuture.sync();
if (channelFuture.isDone() && !channelFuture.isSuccess()) {
LOG.error("Failed to read from %s for block %d with error %s.", address.toString(), blockId, channelFuture.cause());
throw new IOException(channelFuture.cause());
}
RPCResponse response = listener.get(NettyClient.TIMEOUT_MS, TimeUnit.MILLISECONDS);
switch(response.getType()) {
case RPC_BLOCK_READ_RESPONSE:
RPCBlockReadResponse blockResponse = (RPCBlockReadResponse) response;
LOG.debug("Data {} from machine {} received", blockId, address);
RPCResponse.Status status = blockResponse.getStatus();
if (status == RPCResponse.Status.SUCCESS) {
// always clear the previous response before reading another one
close();
mReadResponse = blockResponse;
return blockResponse.getPayloadDataBuffer().getReadOnlyByteBuffer();
}
throw new IOException(status.getMessage() + " response: " + blockResponse);
case RPC_ERROR_RESPONSE:
RPCErrorResponse error = (RPCErrorResponse) response;
throw new IOException(error.getStatus().getMessage());
default:
throw new IOException(ExceptionMessage.UNEXPECTED_RPC_RESPONSE.getMessage(response.getType(), RPCMessage.Type.RPC_BLOCK_READ_RESPONSE));
}
} catch (Exception e) {
Metrics.NETTY_UFS_BLOCK_READ_FAILURES.inc();
try {
if (channel != null) {
channel.close().sync();
}
} catch (InterruptedException ee) {
throw new RuntimeException(ee);
}
throw new IOException(e);
} finally {
if (clientHandler != null) {
clientHandler.removeListeners();
}
if (channel != null) {
mContext.releaseNettyChannel(address, channel);
}
}
}
Aggregations