Search in sources :

Example 1 with Future

use of org.apache.hbase.thirdparty.io.netty.util.concurrent.Future in project hbase by apache.

the class NettyRpcConnection method saslNegotiate.

private void saslNegotiate(final Channel ch) {
    assert eventLoop.inEventLoop();
    UserGroupInformation ticket = provider.getRealUser(remoteId.getTicket());
    if (ticket == null) {
        failInit(ch, new FatalConnectionException("ticket/user is null"));
        return;
    }
    Promise<Boolean> saslPromise = ch.eventLoop().newPromise();
    final NettyHBaseSaslRpcClientHandler saslHandler;
    try {
        saslHandler = new NettyHBaseSaslRpcClientHandler(saslPromise, ticket, provider, token, ((InetSocketAddress) ch.remoteAddress()).getAddress(), securityInfo, rpcClient.fallbackAllowed, this.rpcClient.conf);
    } catch (IOException e) {
        failInit(ch, e);
        return;
    }
    ch.pipeline().addFirst(new SaslChallengeDecoder(), saslHandler);
    saslPromise.addListener(new FutureListener<Boolean>() {

        @Override
        public void operationComplete(Future<Boolean> future) throws Exception {
            if (future.isSuccess()) {
                ChannelPipeline p = ch.pipeline();
                p.remove(SaslChallengeDecoder.class);
                p.remove(NettyHBaseSaslRpcClientHandler.class);
                // check if negotiate with server for connection header is necessary
                if (saslHandler.isNeedProcessConnectionHeader()) {
                    Promise<Boolean> connectionHeaderPromise = ch.eventLoop().newPromise();
                    // create the handler to handle the connection header
                    ChannelHandler chHandler = new NettyHBaseRpcConnectionHeaderHandler(connectionHeaderPromise, conf, connectionHeaderWithLength);
                    // add ReadTimeoutHandler to deal with server doesn't response connection header
                    // because of the different configuration in client side and server side
                    p.addFirst(new ReadTimeoutHandler(RpcClient.DEFAULT_SOCKET_TIMEOUT_READ, TimeUnit.MILLISECONDS));
                    p.addLast(chHandler);
                    connectionHeaderPromise.addListener(new FutureListener<Boolean>() {

                        @Override
                        public void operationComplete(Future<Boolean> future) throws Exception {
                            if (future.isSuccess()) {
                                ChannelPipeline p = ch.pipeline();
                                p.remove(ReadTimeoutHandler.class);
                                p.remove(NettyHBaseRpcConnectionHeaderHandler.class);
                                // don't send connection header, NettyHbaseRpcConnectionHeaderHandler
                                // sent it already
                                established(ch);
                            } else {
                                final Throwable error = future.cause();
                                scheduleRelogin(error);
                                failInit(ch, toIOE(error));
                            }
                        }
                    });
                } else {
                    // send the connection header to server
                    ch.write(connectionHeaderWithLength.retainedDuplicate());
                    established(ch);
                }
            } else {
                final Throwable error = future.cause();
                scheduleRelogin(error);
                failInit(ch, toIOE(error));
            }
        }
    });
}
Also used : ChannelFutureListener(org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener) FutureListener(org.apache.hbase.thirdparty.io.netty.util.concurrent.FutureListener) InetSocketAddress(java.net.InetSocketAddress) IOException(java.io.IOException) ChannelHandler(org.apache.hbase.thirdparty.io.netty.channel.ChannelHandler) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) ChannelPipeline(org.apache.hbase.thirdparty.io.netty.channel.ChannelPipeline) Promise(org.apache.hbase.thirdparty.io.netty.util.concurrent.Promise) NettyHBaseRpcConnectionHeaderHandler(org.apache.hadoop.hbase.security.NettyHBaseRpcConnectionHeaderHandler) SaslChallengeDecoder(org.apache.hadoop.hbase.security.SaslChallengeDecoder) NettyHBaseSaslRpcClientHandler(org.apache.hadoop.hbase.security.NettyHBaseSaslRpcClientHandler) ReadTimeoutHandler(org.apache.hbase.thirdparty.io.netty.handler.timeout.ReadTimeoutHandler) Future(org.apache.hbase.thirdparty.io.netty.util.concurrent.Future) ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 2 with Future

use of org.apache.hbase.thirdparty.io.netty.util.concurrent.Future in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method connectToDataNodes.

private static List<Future<Channel>> connectToDataNodes(Configuration conf, DFSClient client, String clientName, LocatedBlock locatedBlock, long maxBytesRcvd, long latestGS, BlockConstructionStage stage, DataChecksum summer, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass) {
    StorageType[] storageTypes = locatedBlock.getStorageTypes();
    DatanodeInfo[] datanodeInfos = locatedBlock.getLocations();
    boolean connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME, DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
    int timeoutMs = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, READ_TIMEOUT);
    ExtendedBlock blockCopy = new ExtendedBlock(locatedBlock.getBlock());
    blockCopy.setNumBytes(locatedBlock.getBlockSize());
    ClientOperationHeaderProto header = ClientOperationHeaderProto.newBuilder().setBaseHeader(BaseHeaderProto.newBuilder().setBlock(PBHelperClient.convert(blockCopy)).setToken(PBHelperClient.convert(locatedBlock.getBlockToken()))).setClientName(clientName).build();
    ChecksumProto checksumProto = DataTransferProtoUtil.toProto(summer);
    OpWriteBlockProto.Builder writeBlockProtoBuilder = OpWriteBlockProto.newBuilder().setHeader(header).setStage(OpWriteBlockProto.BlockConstructionStage.valueOf(stage.name())).setPipelineSize(1).setMinBytesRcvd(locatedBlock.getBlock().getNumBytes()).setMaxBytesRcvd(maxBytesRcvd).setLatestGenerationStamp(latestGS).setRequestedChecksum(checksumProto).setCachingStrategy(CachingStrategyProto.newBuilder().setDropBehind(true).build());
    List<Future<Channel>> futureList = new ArrayList<>(datanodeInfos.length);
    for (int i = 0; i < datanodeInfos.length; i++) {
        DatanodeInfo dnInfo = datanodeInfos[i];
        StorageType storageType = storageTypes[i];
        Promise<Channel> promise = eventLoopGroup.next().newPromise();
        futureList.add(promise);
        String dnAddr = dnInfo.getXferAddr(connectToDnViaHostname);
        new Bootstrap().group(eventLoopGroup).channel(channelClass).option(CONNECT_TIMEOUT_MILLIS, timeoutMs).handler(new ChannelInitializer<Channel>() {

            @Override
            protected void initChannel(Channel ch) throws Exception {
            // we need to get the remote address of the channel so we can only move on after
            // channel connected. Leave an empty implementation here because netty does not allow
            // a null handler.
            }
        }).connect(NetUtils.createSocketAddr(dnAddr)).addListener(new ChannelFutureListener() {

            @Override
            public void operationComplete(ChannelFuture future) throws Exception {
                if (future.isSuccess()) {
                    initialize(conf, future.channel(), dnInfo, storageType, writeBlockProtoBuilder, timeoutMs, client, locatedBlock.getBlockToken(), promise);
                } else {
                    promise.tryFailure(future.cause());
                }
            }
        });
    }
    return futureList;
}
Also used : ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) StorageType(org.apache.hadoop.fs.StorageType) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ArrayList(java.util.ArrayList) ChecksumProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ChecksumProto) OpWriteBlockProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpWriteBlockProto) ChannelFutureListener(org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) Future(org.apache.hbase.thirdparty.io.netty.util.concurrent.Future) Bootstrap(org.apache.hbase.thirdparty.io.netty.bootstrap.Bootstrap) ClientOperationHeaderProto(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto)

Example 3 with Future

use of org.apache.hbase.thirdparty.io.netty.util.concurrent.Future in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.

private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoopGroup eventLoopGroup, Class<? extends Channel> channelClass, StreamSlowMonitor monitor) throws IOException {
    Configuration conf = dfs.getConf();
    DFSClient client = dfs.getClient();
    String clientName = client.getClientName();
    ClientProtocol namenode = client.getNamenode();
    int createMaxRetries = conf.getInt(ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES, DEFAULT_ASYNC_DFS_OUTPUT_CREATE_MAX_RETRIES);
    ExcludeDatanodeManager excludeDatanodeManager = monitor.getExcludeDatanodeManager();
    Set<DatanodeInfo> toExcludeNodes = new HashSet<>(excludeDatanodeManager.getExcludeDNs().keySet());
    for (int retry = 0; ; retry++) {
        LOG.debug("When create output stream for {}, exclude list is {}, retry={}", src, toExcludeNodes, retry);
        HdfsFileStatus stat;
        try {
            stat = FILE_CREATOR.create(namenode, src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, getCreateFlags(overwrite), createParent, replication, blockSize, CryptoProtocolVersion.supported());
        } catch (Exception e) {
            if (e instanceof RemoteException) {
                throw (RemoteException) e;
            } else {
                throw new NameNodeException(e);
            }
        }
        beginFileLease(client, stat.getFileId());
        boolean succ = false;
        LocatedBlock locatedBlock = null;
        List<Future<Channel>> futureList = null;
        try {
            DataChecksum summer = createChecksum(client);
            locatedBlock = namenode.addBlock(src, client.getClientName(), null, toExcludeNodes.toArray(new DatanodeInfo[0]), stat.getFileId(), null, null);
            Map<Channel, DatanodeInfo> datanodes = new IdentityHashMap<>();
            futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoopGroup, channelClass);
            for (int i = 0, n = futureList.size(); i < n; i++) {
                DatanodeInfo datanodeInfo = locatedBlock.getLocations()[i];
                try {
                    datanodes.put(futureList.get(i).syncUninterruptibly().getNow(), datanodeInfo);
                } catch (Exception e) {
                    // exclude the broken DN next time
                    toExcludeNodes.add(datanodeInfo);
                    excludeDatanodeManager.tryAddExcludeDN(datanodeInfo, "connect error");
                    throw e;
                }
            }
            Encryptor encryptor = createEncryptor(conf, stat, client);
            FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, datanodes, summer, ALLOC, monitor);
            succ = true;
            return output;
        } catch (RemoteException e) {
            LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
            if (shouldRetryCreate(e)) {
                if (retry >= createMaxRetries) {
                    throw e.unwrapRemoteException();
                }
            } else {
                throw e.unwrapRemoteException();
            }
        } catch (IOException e) {
            LOG.warn("create fan-out dfs output {} failed, retry = {}", src, retry, e);
            if (retry >= createMaxRetries) {
                throw e;
            }
            // overwrite the old broken file.
            overwrite = true;
            try {
                Thread.sleep(ConnectionUtils.getPauseTime(100, retry));
            } catch (InterruptedException ie) {
                throw new InterruptedIOException();
            }
        } finally {
            if (!succ) {
                if (futureList != null) {
                    for (Future<Channel> f : futureList) {
                        f.addListener(new FutureListener<Channel>() {

                            @Override
                            public void operationComplete(Future<Channel> future) throws Exception {
                                if (future.isSuccess()) {
                                    future.getNow().close();
                                }
                            }
                        });
                    }
                }
                endFileLease(client, stat.getFileId());
            }
        }
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) Configuration(org.apache.hadoop.conf.Configuration) IdentityHashMap(java.util.IdentityHashMap) FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor(org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor) Encryptor(org.apache.hadoop.crypto.Encryptor) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) HashSet(java.util.HashSet) DFSClient(org.apache.hadoop.hdfs.DFSClient) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) Channel(org.apache.hbase.thirdparty.io.netty.channel.Channel) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) DataChecksum(org.apache.hadoop.util.DataChecksum) ChannelFuture(org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture) Future(org.apache.hbase.thirdparty.io.netty.util.concurrent.Future) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) RemoteException(org.apache.hadoop.ipc.RemoteException) ExcludeDatanodeManager(org.apache.hadoop.hbase.io.asyncfs.monitor.ExcludeDatanodeManager)

Aggregations

IOException (java.io.IOException)3 ChannelFuture (org.apache.hbase.thirdparty.io.netty.channel.ChannelFuture)3 Future (org.apache.hbase.thirdparty.io.netty.util.concurrent.Future)3 InterruptedIOException (java.io.InterruptedIOException)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 UnresolvedLinkException (org.apache.hadoop.fs.UnresolvedLinkException)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 InvalidBlockTokenException (org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException)2 LeaseExpiredException (org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException)2 RemoteException (org.apache.hadoop.ipc.RemoteException)2 Channel (org.apache.hbase.thirdparty.io.netty.channel.Channel)2 ChannelFutureListener (org.apache.hbase.thirdparty.io.netty.channel.ChannelFutureListener)2 InetSocketAddress (java.net.InetSocketAddress)1 UnknownHostException (java.net.UnknownHostException)1 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 IdentityHashMap (java.util.IdentityHashMap)1 Configuration (org.apache.hadoop.conf.Configuration)1 Encryptor (org.apache.hadoop.crypto.Encryptor)1 StorageType (org.apache.hadoop.fs.StorageType)1