Search in sources :

Example 1 with Channel

use of org.apache.hbase.thirdparty.io.netty.channel.Channel in project hbase by apache.

the class NettyRpcConnection method connect.

private void connect() throws UnknownHostException {
    assert eventLoop.inEventLoop();
    LOG.trace("Connecting to {}", remoteId.getAddress());
    InetSocketAddress remoteAddr = getRemoteInetAddress(rpcClient.metrics);
    this.channel = new Bootstrap().group(eventLoop).channel(rpcClient.channelClass).option(ChannelOption.TCP_NODELAY, rpcClient.isTcpNoDelay()).option(ChannelOption.SO_KEEPALIVE, rpcClient.tcpKeepAlive).option(ChannelOption.CONNECT_TIMEOUT_MILLIS, rpcClient.connectTO).handler(new BufferCallBeforeInitHandler()).localAddress(rpcClient.localAddr).remoteAddress(remoteAddr).connect().addListener(new ChannelFutureListener() {

        @Override
        public void operationComplete(ChannelFuture future) throws Exception {
            Channel ch = future.channel();
            if (!future.isSuccess()) {
                failInit(ch, toIOE(future.cause()));
                rpcClient.failedServers.addToFailedServers(remoteId.getAddress(), future.cause());
                return;
            }
            ch.writeAndFlush(connectionHeaderPreamble.retainedDuplicate());
            if (useSasl) {
                saslNegotiate(ch);
            } else {
                // send the connection header to server
                ch.write(connectionHeaderWithLength.retainedDuplicate());
                established(ch);
            }
        }
    }).channel();
}
Also used : ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) InetSocketAddress(java.net.InetSocketAddress) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) Bootstrap(org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap) ChannelFutureListener(org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException)

Example 2 with Channel

use of org.apache.hbase.thirdparty.io.netty.channel.Channel in project hbase by apache.

the class HttpProxyExample method start.

public void start() throws InterruptedException, ExecutionException {
    NettyRpcClientConfigHelper.setEventLoopConfig(conf, workerGroup, NioSocketChannel.class);
    conn = ConnectionFactory.createAsyncConnection(conf).get();
    channelGroup = new DefaultChannelGroup(GlobalEventExecutor.INSTANCE);
    serverChannel = new ServerBootstrap().group(bossGroup, workerGroup).channel(NioServerSocketChannel.class).childOption(ChannelOption.TCP_NODELAY, true).childOption(ChannelOption.SO_REUSEADDR, true).childHandler(new ChannelInitializer<Channel>() {

        @Override
        protected void initChannel(Channel ch) throws Exception {
            ch.pipeline().addFirst(new HttpServerCodec(), new HttpObjectAggregator(4 * 1024 * 1024), new RequestHandler(conn, channelGroup));
        }
    }).bind(port).syncUninterruptibly().channel();
}
Also used : DefaultChannelGroup(org.apache.hbase.thirdparty.io.netty.channel.group.DefaultChannelGroup) HttpObjectAggregator(org.apache.hbase.thirdparty.io.netty.handler.codec.http.HttpObjectAggregator) NioServerSocketChannel(org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioServerSocketChannel) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) NioServerSocketChannel(org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioServerSocketChannel) NioSocketChannel(org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel) HttpServerCodec(org.apache.hbase.thirdparty.io.netty.handler.codec.http.HttpServerCodec) ServerBootstrap(org.apache.hbase.thirdparty.io.netty.bootstrap.ServerBootstrap) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException)

Example 3 with Channel

use of org.apache.hbase.thirdparty.io.netty.channel.Channel in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.

private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass) {
    StorageType[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)).setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        DatanodeInfo dnInfo = datanodeInfos[i];
        StorageType storageType = storageTypes[i];
        Promise<Channel> promise = eventLoopGroup.next().newPromise();
        futureList.add(promise);
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoopGroup).channel(channelClass).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

            @Override
            protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
            }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
                } else {
                    promise.tryFailure(future.cause());
                }
            }
        });
    }
    return futureList;
}
Also used : ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) ChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) OpWriteBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) ChannelFutureListener(org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) Future(org.apache.hbase.thirdparty.io.netty.util.concurrent.Future) Bootstrap(org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap) ClientOperationHeaderProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)

Example 4 with Channel

use of org.apache.hbase.thirdparty.io.netty.channel.Channel in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.

private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor) throws IOException {
    Configuration conf = dfs.getConf();
    DFSClient client = dfs.getClient();
    String clientName = client.getClientName();
    ClientProtocol namenode = client.getNamenode();
    int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
    ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
    Set<DatanodeInfo> toExcludeNodes = new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
    for (int retry = 0; ; retry++) {
        LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, toExcludeNodes, retry);
        HdfsFileStatus stat;
        try {
            stat = FILE_CREATOR.create(namenode, src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, getCreateFlags(overwrite), createParent, replication, blockSize, CryptoProtocolVersion.supported());
        } catch (Exception e) {
            if (e instanceof RemoteException) {
                throw (RemoteException) e;
            } else {
                throw new NameNodeException(e);
            }
        }
        beginFileLease(client, stat.getFileId());
        boolean succ = false;
        LocatedBlock locatedBlock = null;
        List<Future<Channel>> futureList = null;
        try {
            DataChecksum summer = createChecksum(client);
            locatedBlock = namenode.addBlock(src, client.getClientName(), null, toExcludeNodes.toArray(new DatanodeInfo[0]), stat.getFileId(), null, null);
            Map<Channel, DatanodeInfo> datanodes = new IdentityHashMap<>();
            futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoopGroup, channelClass);
            for (int i = 0, n = futureList.size(); i < n; i++) {
                DatanodeInfo datanodeInfo = locatedBlock.getLocations()[i];
                try {
                    datanodes.put(futureList.get(i).syncUninterruptibly().getNow(), datanodeInfo);
                } catch (Exception e) {
                    // exclude the broken DN next time
                    toExcludeNodes.add(datanodeInfo);
                    excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "connect error");
                    throw e;
                }
            }
            Encryptor encryptor = createEncryptor(conf, stat, client);
            FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor);
            succ = true;
            return output;
        } catch (RemoteException e) {
            LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
            if (shouldRetryCreate(e)) {
                if (retry >= createMaxRetries) {
                    throw e.unwrapRemoteException();
                }
            } else {
                throw e.unwrapRemoteException();
            }
        } catch (IOException e) {
            LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
            if (retry >= createMaxRetries) {
                throw e;
            }
            // overwrite the old broken file.
            overwrite = true;
            try {
                Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
            } catch (InterruptedException ie) {
                throw new InterruptedIOException();
            }
        } finally {
            if (!succ) {
                if (futureList != null) {
                    for (Future<Channel> f : futureList) {
                        f.addListener(new FutureListener<Channel>() {

                            @Override
                            public void operationComplete(Future<Channel> future) throws Exception {
                                if (future.isSuccess()) {
                                    future.getNow().close();
                                }
                            }
                        });
                    }
                }
                endFileLease(client, stat.getFileId());
            }
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) Configuration(org.apache.hadoop.conf.Configuration) IdentityHashMap(java.util.IdentityHashMap) FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor(org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor) Encryptor(org.apache.hadoop.crypto.Encryptor) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HashSet(java.util.HashSet) DFSClient(org.apache.hadoop.hdfs.DFSClient) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) DataChecksum(org.apache.hadoop.util.DataChecksum) ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) Future(org.apache.hbase.thirdparty.io.netty.util.concurrent.Future) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) RemoteException(org.apache.hadoop.ipc.RemoteException) ExcludeDatanodeManager(org.apache.hadoop.hbase.io.asyncfs.monitor.ExcludeDatanodeManager)

Example 5 with Channel

use of org.apache.hbase.thirdparty.io.netty.channel.Channel in project hbase by apache.

the class TestFanOutOneBlockAsyncDFSOutputHang method testFlushHangWhenOneDataNodeFailedBeforeOtherDataNodeAck.

/**
 * <pre>
 * This test is for HBASE-26679. Consider there are two dataNodes: dn1 and dn2,dn2 is a slow DN.
 * The threads sequence before HBASE-26679 is:
 * 1.We write some data to {@link FanOutOneBlockAsyncDFSOutput} and then flush it, there are one
 *   {@link FanOutOneBlockAsyncDFSOutput.Callback} in
 *   {@link FanOutOneBlockAsyncDFSOutput#waitingAckQueue}.
 * 2.The ack from dn1 arrives firstly and triggers Netty to invoke
 *   {@link FanOutOneBlockAsyncDFSOutput#completed} with dn1's channel, then in
 *   {@link FanOutOneBlockAsyncDFSOutput#completed}, dn1's channel is removed from
 *   {@link FanOutOneBlockAsyncDFSOutput.Callback#unfinishedReplicas}.
 * 3.But dn2 responds slowly, before dn2 sending ack,dn1 is shut down or have a exception,
 *   so {@link FanOutOneBlockAsyncDFSOutput#failed} is triggered by Netty with dn1's channel,
 *   and because the {@link FanOutOneBlockAsyncDFSOutput.Callback#unfinishedReplicas} does not
 *   contain dn1's channel,the {@link FanOutOneBlockAsyncDFSOutput.Callback} is skipped in
 *   {@link FanOutOneBlockAsyncDFSOutput#failed} method,and
 *   {@link FanOutOneBlockAsyncDFSOutput#state} is set to
 *   {@link FanOutOneBlockAsyncDFSOutput.State#BROKEN},and dn1,dn2 are all closed at the end of
 *   {@link FanOutOneBlockAsyncDFSOutput#failed}.
 * 4.{@link FanOutOneBlockAsyncDFSOutput#failed} is triggered again by dn2 because it is closed,
 *   but because {@link FanOutOneBlockAsyncDFSOutput#state} is already
 *   {@link FanOutOneBlockAsyncDFSOutput.State#BROKEN},the whole
 *   {@link FanOutOneBlockAsyncDFSOutput#failed} is skipped. So wait on the future
 *   returned by {@link FanOutOneBlockAsyncDFSOutput#flush} would be stuck for ever.
 * After HBASE-26679, for above step 4,even if the {@link FanOutOneBlockAsyncDFSOutput#state}
 * is already {@link FanOutOneBlockAsyncDFSOutput.State#BROKEN}, we would still try to trigger
 * {@link FanOutOneBlockAsyncDFSOutput.Callback#future}.
 * </pre>
 */
@Test
public void testFlushHangWhenOneDataNodeFailedBeforeOtherDataNodeAck() throws Exception {
    DataNodeProperties firstDataNodeProperties = null;
    try {
        final CyclicBarrier dn1AckReceivedCyclicBarrier = new CyclicBarrier(2);
        Map<Channel, DatanodeInfo> datanodeInfoMap = OUT.getDatanodeInfoMap();
        Iterator<Map.Entry<Channel, DatanodeInfo>> iterator = datanodeInfoMap.entrySet().iterator();
        assertTrue(iterator.hasNext());
        Map.Entry<Channel, DatanodeInfo> dn1Entry = iterator.next();
        Channel dn1Channel = dn1Entry.getKey();
        DatanodeInfo dn1DatanodeInfo = dn1Entry.getValue();
        final List<String> protobufDecoderNames = new ArrayList<String>();
        dn1Channel.pipeline().forEach((entry) -> {
            if (ProtobufDecoder.class.isInstance(entry.getValue())) {
                protobufDecoderNames.add(entry.getKey());
            }
        });
        assertTrue(protobufDecoderNames.size() == 1);
        dn1Channel.pipeline().addAfter(protobufDecoderNames.get(0), "dn1AckReceivedHandler", new ChannelInboundHandlerAdapter() {

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                super.channelRead(ctx, msg);
                dn1AckReceivedCyclicBarrier.await();
            }
        });
        assertTrue(iterator.hasNext());
        Map.Entry<Channel, DatanodeInfo> dn2Entry = iterator.next();
        Channel dn2Channel = dn2Entry.getKey();
        /**
         * Here we add a {@link ChannelInboundHandlerAdapter} to eat all the responses to simulate a
         * slow dn2.
         */
        dn2Channel.pipeline().addFirst(new ChannelInboundHandlerAdapter() {

            @Override
            public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
                if (!(msg instanceof ByteBuf)) {
                    ctx.fireChannelRead(msg);
                }
            }
        });
        byte[] b = new byte[10];
        ThreadLocalRandom.current().nextBytes(b);
        OUT.write(b, 0, b.length);
        CompletableFuture<Long> future = OUT.flush(false);
        /**
         * Wait for ack from dn1.
         */
        dn1AckReceivedCyclicBarrier.await();
        /**
         * First ack is received from dn1,we could stop dn1 now.
         */
        firstDataNodeProperties = findAndKillFirstDataNode(dn1DatanodeInfo);
        assertTrue(firstDataNodeProperties != null);
        try {
            /**
             * Before HBASE-26679,here we should be stuck, after HBASE-26679,we would fail soon with
             * {@link ExecutionException}.
             */
            future.get();
            fail();
        } catch (ExecutionException e) {
            assertTrue(e != null);
            LOG.info("expected exception caught when get future", e);
        }
        /**
         * Make sure all the data node channel are closed.
         */
        datanodeInfoMap.keySet().forEach(ch -> {
            try {
                ch.closeFuture().get();
            } catch (InterruptedException | ExecutionException e) {
                throw new RuntimeException(e);
            }
        });
    } finally {
        if (firstDataNodeProperties != null) {
            CLUSTER.restartDataNode(firstDataNodeProperties);
        }
    }
}
Also used : DataNodeProperties(org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties) ArrayList(java.util.ArrayList) ChannelHandlerContext(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandlerContext) ByteBuf(org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf) ExecutionException(java.util.concurrent.ExecutionException) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) NioSocketChannel(org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) CyclicBarrier(java.util.concurrent.CyclicBarrier) Map(java.util.Map) ChannelInboundHandlerAdapter(org.apache.hbase.thirdparty.io.netty.channel.ChannelInboundHandlerAdapter) Test(org.junit.Test)

Aggregations

Channel (org.apache.hbase.thirdparty.io.netty.channel.Channel)7 IOException (java.io.IOException)5 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)3 ChannelFuture (org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture)3 NioSocketChannel (org.apache.hbase.thirdparty.io.netty.channel.socket.nio.NioSocketChannel)3 InterruptedIOException (java.io.InterruptedIOException)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 ArrayList (java.util.ArrayList)2 ExecutionException (java.util.concurrent.ExecutionException)2 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)2 InvalidBlockTokenException (org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException)2 LeaseExpiredException (org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 Bootstrap (org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap)2 ChannelFutureListener (org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener)2 Future (org.apache.hbase.thirdparty.io.netty.util.concurrent.Future)2 InetSocketAddress (java.net.InetSocketAddress)1 UnknownHostException (java.net.UnknownHostException)1 HashSet (java.util.HashSet)1 IdentityHashMap (java.util.IdentityHashMap)1