Search in sources :

Example 81 with ChannelFuture

use of io.netty.channel.ChannelFuture in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.

private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoop eventLoop) {
    Enum<?>[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PB_HELPER.convert(blockCopy)).setToken(PB_HELPER.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        DatanodeInfo dnInfo = datanodeInfos[i];
        Enum<?> storageType = storageTypes[i];
        Promise<Channel> promise = eventLoop.newPromise();
        futureList.add(promise);
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoop).channel(NioSocketChannel.class).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

            @Override
            protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
            }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
                } else {
                    promise.tryFailure(future.cause());
                }
            }
        });
    }
    return futureList;
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Channel(io.netty.channel.Channel) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) ChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) OpWriteBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) ChannelFutureListener(io.netty.channel.ChannelFutureListener) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ChannelFuture(io.netty.channel.ChannelFuture) Future(io.netty.util.concurrent.Future) Bootstrap(io.netty.bootstrap.Bootstrap) ClientOperationHeaderProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)

Example 82 with ChannelFuture

use of io.netty.channel.ChannelFuture in project mongo-java-driver by mongodb.

the class NettyStream method writeAsync.

@Override
public void writeAsync(final List<ByteBuf> buffers, final AsyncCompletionHandler<Void> handler) {
    CompositeByteBuf composite = PooledByteBufAllocator.DEFAULT.compositeBuffer();
    for (ByteBuf cur : buffers) {
        composite.addComponent(true, ((NettyByteBuf) cur).asByteBuf());
    }
    channel.writeAndFlush(composite).addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(final ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                handler.failed(future.cause());
            } else {
                handler.completed(null);
            }
        }
    });
}
Also used : CompositeByteBuf(io.netty.buffer.CompositeByteBuf) ChannelFuture(io.netty.channel.ChannelFuture) ByteBuf(org.bson.ByteBuf) CompositeByteBuf(io.netty.buffer.CompositeByteBuf) ChannelFutureListener(io.netty.channel.ChannelFutureListener) MongoInternalException(com.mongodb.MongoInternalException) MongoSocketOpenException(com.mongodb.MongoSocketOpenException) ReadTimeoutException(io.netty.handler.timeout.ReadTimeoutException) MongoInterruptedException(com.mongodb.MongoInterruptedException) MongoException(com.mongodb.MongoException) IOException(java.io.IOException) MongoSocketReadTimeoutException(com.mongodb.MongoSocketReadTimeoutException)

Example 83 with ChannelFuture

use of io.netty.channel.ChannelFuture in project pinot by linkedin.

the class NettyTCPClientConnection method connect.

/**
   * Open a connection
   */
@Override
public boolean connect() {
    try {
        checkTransition(State.CONNECTED);
        //Connect synchronously. At the end of this line, _channel should have been set
        TimerContext t = MetricsHelper.startTimer();
        ChannelFuture f = _bootstrap.connect(_server.getHostname(), _server.getPort()).sync();
        /**
       * Waiting for future alone does not guarantee that _channel is set. _channel is set
       * only when the channelActive() async callback runs. So we should also wait for it.
       */
        f.get();
        _channelSet.await();
        t.stop();
        _connState = State.CONNECTED;
        _clientMetric.addConnectStats(t.getLatencyMs());
        return true;
    } catch (Exception ie) {
        if (ie instanceof ConnectException && ie.getMessage() != null && ie.getMessage().startsWith("Connection refused")) {
            // Most common case when a server is down. Don't print the entire stack and fill the logs.
            LOGGER.error("Could not connect to server {}:{} connId:{}", _server, ie.getMessage(), getConnId());
        } else {
            LOGGER.error("Got exception when connecting to server {} connId {}", _server, ie, getConnId());
        }
    }
    return false;
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) TimerContext(com.linkedin.pinot.common.metrics.MetricsHelper.TimerContext) ConnectException(java.net.ConnectException) ConnectException(java.net.ConnectException)

Example 84 with ChannelFuture

use of io.netty.channel.ChannelFuture in project flink by apache.

the class PartitionRequestClient method requestSubpartition.

/**
	 * Requests a remote intermediate result partition queue.
	 * <p>
	 * The request goes to the remote producer, for which this partition
	 * request client instance has been created.
	 */
public ChannelFuture requestSubpartition(final ResultPartitionID partitionId, final int subpartitionIndex, final RemoteInputChannel inputChannel, int delayMs) throws IOException {
    checkNotClosed();
    LOG.debug("Requesting subpartition {} of partition {} with {} ms delay.", subpartitionIndex, partitionId, delayMs);
    partitionRequestHandler.addInputChannel(inputChannel);
    final PartitionRequest request = new PartitionRequest(partitionId, subpartitionIndex, inputChannel.getInputChannelId());
    final ChannelFutureListener listener = new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                partitionRequestHandler.removeInputChannel(inputChannel);
                inputChannel.onError(new LocalTransportException("Sending the partition request failed.", future.channel().localAddress(), future.cause()));
            }
        }
    };
    if (delayMs == 0) {
        ChannelFuture f = tcpChannel.writeAndFlush(request);
        f.addListener(listener);
        return f;
    } else {
        final ChannelFuture[] f = new ChannelFuture[1];
        tcpChannel.eventLoop().schedule(new Runnable() {

            @Override
            public void run() {
                f[0] = tcpChannel.writeAndFlush(request);
                f[0].addListener(listener);
            }
        }, delayMs, TimeUnit.MILLISECONDS);
        return f[0];
    }
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) PartitionRequest(org.apache.flink.runtime.io.network.netty.NettyMessage.PartitionRequest) LocalTransportException(org.apache.flink.runtime.io.network.netty.exception.LocalTransportException) ChannelFutureListener(io.netty.channel.ChannelFutureListener)

Example 85 with ChannelFuture

use of io.netty.channel.ChannelFuture in project flink by apache.

the class PartitionRequestClient method sendTaskEvent.

/**
	 * Sends a task event backwards to an intermediate result partition producer.
	 * <p>
	 * Backwards task events flow between readers and writers and therefore
	 * will only work when both are running at the same time, which is only
	 * guaranteed to be the case when both the respective producer and
	 * consumer task run pipelined.
	 */
public void sendTaskEvent(ResultPartitionID partitionId, TaskEvent event, final RemoteInputChannel inputChannel) throws IOException {
    checkNotClosed();
    tcpChannel.writeAndFlush(new TaskEventRequest(event, partitionId, inputChannel.getInputChannelId())).addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            if (!future.isSuccess()) {
                inputChannel.onError(new LocalTransportException("Sending the task event failed.", future.channel().localAddress(), future.cause()));
            }
        }
    });
}
Also used : ChannelFuture(io.netty.channel.ChannelFuture) TaskEventRequest(org.apache.flink.runtime.io.network.netty.NettyMessage.TaskEventRequest) LocalTransportException(org.apache.flink.runtime.io.network.netty.exception.LocalTransportException) ChannelFutureListener(io.netty.channel.ChannelFutureListener) IOException(java.io.IOException) LocalTransportException(org.apache.flink.runtime.io.network.netty.exception.LocalTransportException)

Aggregations

ChannelFuture (io.netty.channel.ChannelFuture)383 Test (org.junit.Test)131 Channel (io.netty.channel.Channel)120 ChannelHandlerContext (io.netty.channel.ChannelHandlerContext)105 Bootstrap (io.netty.bootstrap.Bootstrap)98 NioEventLoopGroup (io.netty.channel.nio.NioEventLoopGroup)94 ChannelFutureListener (io.netty.channel.ChannelFutureListener)88 ByteBuf (io.netty.buffer.ByteBuf)81 InetSocketAddress (java.net.InetSocketAddress)79 EventLoopGroup (io.netty.channel.EventLoopGroup)78 NioSocketChannel (io.netty.channel.socket.nio.NioSocketChannel)76 IOException (java.io.IOException)69 ChannelPipeline (io.netty.channel.ChannelPipeline)67 ServerBootstrap (io.netty.bootstrap.ServerBootstrap)64 ClosedChannelException (java.nio.channels.ClosedChannelException)56 ChannelInitializer (io.netty.channel.ChannelInitializer)47 AtomicReference (java.util.concurrent.atomic.AtomicReference)47 ArrayList (java.util.ArrayList)46 SslHandler (io.netty.handler.ssl.SslHandler)45 Http2Headers (io.netty.handler.codec.http2.Http2Headers)44