Search in sources :

Example 6 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class NameNodeProxiesClient method createNonHAProxyWithClientProtocol.

public static ClientProtocol createNonHAProxyWithClientProtocol(InetSocketAddress address, Configuration conf, UserGroupInformation ugi, boolean withRetries, AtomicBoolean fallbackToSimpleAuth) throws IOException {
    RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);
    final RetryPolicy defaultPolicy = RetryUtils.getDefaultRetryPolicy(conf, HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY, HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT, HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY, HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT, SafeModeException.class.getName());
    final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
    ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(ClientNamenodeProtocolPB.class, version, address, ugi, conf, NetUtils.getDefaultSocketFactory(conf), org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy, fallbackToSimpleAuth).getProxy();
    if (withRetries) {
        // create the proxy with retries
        Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
        ClientProtocol translatorProxy = new ClientNamenodeProtocolTranslatorPB(proxy);
        return (ClientProtocol) RetryProxy.create(ClientProtocol.class, new DefaultFailoverProxyProvider<>(ClientProtocol.class, translatorProxy), methodNameToPolicyMap, defaultPolicy);
    } else {
        return new ClientNamenodeProtocolTranslatorPB(proxy);
    }
}
Also used : ClientNamenodeProtocolPB(org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB) HashMap(java.util.HashMap) ClientNamenodeProtocolTranslatorPB(org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB) SafeModeException(org.apache.hadoop.hdfs.server.namenode.SafeModeException) DefaultFailoverProxyProvider(org.apache.hadoop.io.retry.DefaultFailoverProxyProvider) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) RetryPolicy(org.apache.hadoop.io.retry.RetryPolicy)

Example 7 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.

the class TestBlockReorder method getNamenode.

private static ClientProtocol getNamenode(DFSClient dfsc) throws Exception {
    Field nf = DFSClient.class.getDeclaredField("namenode");
    nf.setAccessible(true);
    return (ClientProtocol) nf.get(dfsc);
}
Also used : Field(java.lang.reflect.Field) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 8 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createBlockAdder.

private static BlockAdder createBlockAdder() throws NoSuchMethodException {
    for (Method method : ClientProtocol.class.getMethods()) {
        if (method.getName().equals("addBlock")) {
            Method addBlockMethod = method;
            Class<?>[] paramTypes = addBlockMethod.getParameterTypes();
            if (paramTypes[paramTypes.length - 1] == String[].class) {
                return new BlockAdder() {

                    @Override
                    public LocatedBlock addBlock(ClientProtocol namenode, String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, String[] favoredNodes) throws IOException {
                        try {
                            return (LocatedBlock) addBlockMethod.invoke(namenode, src, clientName, previous, excludeNodes, fileId, favoredNodes);
                        } catch (IllegalAccessException e) {
                            throw new RuntimeException(e);
                        } catch (InvocationTargetException e) {
                            Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
                            throw new RuntimeException(e);
                        }
                    }
                };
            } else {
                return new BlockAdder() {

                    @Override
                    public LocatedBlock addBlock(ClientProtocol namenode, String src, String clientName, ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId, String[] favoredNodes) throws IOException {
                        try {
                            return (LocatedBlock) addBlockMethod.invoke(namenode, src, clientName, previous, excludeNodes, fileId, favoredNodes, null);
                        } catch (IllegalAccessException e) {
                            throw new RuntimeException(e);
                        } catch (InvocationTargetException e) {
                            Throwables.propagateIfPossible(e.getTargetException(), IOException.class);
                            throw new RuntimeException(e);
                        }
                    }
                };
            }
        }
    }
    throw new NoSuchMethodException("Can not find addBlock method in ClientProtocol");
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Method(java.lang.reflect.Method) IOException(java.io.IOException) InvocationTargetException(java.lang.reflect.InvocationTargetException) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 9 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.

the class FanOutOneBlockAsyncDFSOutputHelper method createOutput.

private static FanOutOneBlockAsyncDFSOutput createOutput(DistributedFileSystem dfs, String src, boolean overwrite, boolean createParent, short replication, long blockSize, EventLoop eventLoop) throws IOException {
    Configuration conf = dfs.getConf();
    FSUtils fsUtils = FSUtils.getInstance(dfs, conf);
    DFSClient client = dfs.getClient();
    String clientName = client.getClientName();
    ClientProtocol namenode = client.getNamenode();
    HdfsFileStatus stat;
    try {
        stat = namenode.create(src, FsPermission.getFileDefault().applyUMask(FsPermission.getUMask(conf)), clientName, new EnumSetWritable<>(overwrite ? EnumSet.of(CREATE, OVERWRITE) : EnumSet.of(CREATE)), createParent, replication, blockSize, CryptoProtocolVersion.supported());
    } catch (Exception e) {
        if (e instanceof RemoteException) {
            throw (RemoteException) e;
        } else {
            throw new NameNodeException(e);
        }
    }
    beginFileLease(client, stat.getFileId());
    boolean succ = false;
    LocatedBlock locatedBlock = null;
    List<Future<Channel>> futureList = null;
    try {
        DataChecksum summer = createChecksum(client);
        locatedBlock = BLOCK_ADDER.addBlock(namenode, src, client.getClientName(), null, null, stat.getFileId(), null);
        List<Channel> datanodeList = new ArrayList<>();
        futureList = connectToDataNodes(conf, client, clientName, locatedBlock, 0L, 0L, PIPELINE_SETUP_CREATE, summer, eventLoop);
        for (Future<Channel> future : futureList) {
            // fail the creation if there are connection failures since we are fail-fast. The upper
            // layer should retry itself if needed.
            datanodeList.add(future.syncUninterruptibly().getNow());
        }
        Encryptor encryptor = createEncryptor(conf, stat, client);
        FanOutOneBlockAsyncDFSOutput output = new FanOutOneBlockAsyncDFSOutput(conf, fsUtils, dfs, client, namenode, clientName, src, stat.getFileId(), locatedBlock, encryptor, eventLoop, datanodeList, summer, ALLOC);
        succ = true;
        return output;
    } finally {
        if (!succ) {
            if (futureList != null) {
                for (Future<Channel> f : futureList) {
                    f.addListener(new FutureListener<Channel>() {

                        @Override
                        public void operationComplete(Future<Channel> future) throws Exception {
                            if (future.isSuccess()) {
                                future.getNow().close();
                            }
                        }
                    });
                }
            }
            endFileLease(client, stat.getFileId());
            fsUtils.recoverFileLease(dfs, new Path(src), conf, new CancelOnClose(client));
        }
    }
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) ArrayList(java.util.ArrayList) FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor(org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createEncryptor) Encryptor(org.apache.hadoop.crypto.Encryptor) HdfsFileStatus(org.apache.hadoop.hdfs.protocol.HdfsFileStatus) FSUtils(org.apache.hadoop.hbase.util.FSUtils) DFSClient(org.apache.hadoop.hdfs.DFSClient) EnumSetWritable(org.apache.hadoop.io.EnumSetWritable) Path(org.apache.hadoop.fs.Path) NioSocketChannel(io.netty.channel.socket.nio.NioSocketChannel) Channel(io.netty.channel.Channel) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) UnresolvedLinkException(org.apache.hadoop.fs.UnresolvedLinkException) LeaseExpiredException(org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException) InvalidBlockTokenException(org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException) InvocationTargetException(java.lang.reflect.InvocationTargetException) IOException(java.io.IOException) RemoteException(org.apache.hadoop.ipc.RemoteException) DataChecksum(org.apache.hadoop.util.DataChecksum) ChannelFuture(io.netty.channel.ChannelFuture) Future(io.netty.util.concurrent.Future) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) RemoteException(org.apache.hadoop.ipc.RemoteException)

Example 10 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hbase by apache.

the class HFileSystem method addLocationsOrderInterceptor.

/**
   * Add an interceptor on the calls to the namenode#getBlockLocations from the DFSClient
   * linked to this FileSystem. See HBASE-6435 for the background.
   * <p/>
   * There should be no reason, except testing, to create a specific ReorderBlocks.
   *
   * @return true if the interceptor was added, false otherwise.
   */
static boolean addLocationsOrderInterceptor(Configuration conf, final ReorderBlocks lrb) {
    if (!conf.getBoolean("hbase.filesystem.reorder.blocks", true)) {
        // activated by default
        LOG.debug("addLocationsOrderInterceptor configured to false");
        return false;
    }
    FileSystem fs;
    try {
        fs = FileSystem.get(conf);
    } catch (IOException e) {
        LOG.warn("Can't get the file system from the conf.", e);
        return false;
    }
    if (!(fs instanceof DistributedFileSystem)) {
        LOG.debug("The file system is not a DistributedFileSystem. " + "Skipping on block location reordering");
        return false;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    DFSClient dfsc = dfs.getClient();
    if (dfsc == null) {
        LOG.warn("The DistributedFileSystem does not contain a DFSClient. Can't add the location " + "block reordering interceptor. Continuing, but this is unexpected.");
        return false;
    }
    try {
        Field nf = DFSClient.class.getDeclaredField("namenode");
        nf.setAccessible(true);
        Field modifiersField = Field.class.getDeclaredField("modifiers");
        modifiersField.setAccessible(true);
        modifiersField.setInt(nf, nf.getModifiers() & ~Modifier.FINAL);
        ClientProtocol namenode = (ClientProtocol) nf.get(dfsc);
        if (namenode == null) {
            LOG.warn("The DFSClient is not linked to a namenode. Can't add the location block" + " reordering interceptor. Continuing, but this is unexpected.");
            return false;
        }
        ClientProtocol cp1 = createReorderingProxy(namenode, lrb, conf);
        nf.set(dfsc, cp1);
        LOG.info("Added intercepting call to namenode#getBlockLocations so can do block reordering" + " using class " + lrb.getClass().getName());
    } catch (NoSuchFieldException e) {
        LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
        return false;
    } catch (IllegalAccessException e) {
        LOG.warn("Can't modify the DFSClient#namenode field to add the location reorder.", e);
        return false;
    }
    return true;
}
Also used : DFSClient(org.apache.hadoop.hdfs.DFSClient) Field(java.lang.reflect.Field) FileSystem(org.apache.hadoop.fs.FileSystem) FilterFileSystem(org.apache.hadoop.fs.FilterFileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) LocalFileSystem(org.apache.hadoop.fs.LocalFileSystem) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Aggregations

ClientProtocol (org.apache.hadoop.hdfs.protocol.ClientProtocol)21 Configuration (org.apache.hadoop.conf.Configuration)14 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 URI (java.net.URI)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 ProxyAndInfo (org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo)7 IOException (java.io.IOException)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 Path (org.apache.hadoop.fs.Path)4 Test (org.junit.Test)4 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3 Field (java.lang.reflect.Field)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 InetSocketAddress (java.net.InetSocketAddress)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 StorageType (org.apache.hadoop.fs.StorageType)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 ClientNamenodeProtocolPB (org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB)2