Search in sources :

Example 11 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class NameNodeProxiesClient method createProxyWithClientProtocol.

/**
   * Creates the namenode proxy with the ClientProtocol. This will handle
   * creation of either HA- or non-HA-enabled proxy objects, depending upon
   * if the provided URI is a configured logical URI.
   *
   * @param conf the configuration containing the required IPC
   *        properties, client failover configurations, etc.
   * @param nameNodeUri the URI pointing either to a specific NameNode
   *        or to a logical nameservice.
   * @param fallbackToSimpleAuth set to true or false during calls to indicate
   *        if a secure client falls back to simple auth
   * @return an object containing both the proxy and the associated
   *         delegation token service it corresponds to
   * @throws IOException if there is an error creating the proxy
   * @see {@link NameNodeProxies#createProxy(Configuration, URI, Class)}.
   */
public static ProxyAndInfo<ClientProtocol> createProxyWithClientProtocol(Configuration conf, URI nameNodeUri, AtomicBoolean fallbackToSimpleAuth) throws IOException {
    AbstractNNFailoverProxyProvider<ClientProtocol> failoverProxyProvider = createFailoverProxyProvider(conf, nameNodeUri, ClientProtocol.class, true, fallbackToSimpleAuth);
    if (failoverProxyProvider == null) {
        InetSocketAddress nnAddr = DFSUtilClient.getNNAddress(nameNodeUri);
        Text dtService = SecurityUtil.buildTokenService(nnAddr);
        ClientProtocol proxy = createNonHAProxyWithClientProtocol(nnAddr, conf, UserGroupInformation.getCurrentUser(), true, fallbackToSimpleAuth);
        return new ProxyAndInfo<>(proxy, dtService, nnAddr);
    } else {
        return createHAProxy(conf, nameNodeUri, ClientProtocol.class, failoverProxyProvider);
    }
}
Also used : InetSocketAddress(java.net.InetSocketAddress) Text(org.apache.hadoop.io.Text) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 12 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DfsServlet method createNameNodeProxy.

/**
   * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
   */
protected ClientProtocol createNameNodeProxy() throws IOException {
    ServletContext context = getServletContext();
    // if we are running in the Name Node, use it directly rather than via 
    // rpc
    NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
    if (nn != null) {
        return nn.getRpcServer();
    }
    InetSocketAddress nnAddr = NameNodeHttpServer.getNameNodeAddressFromContext(context);
    Configuration conf = new HdfsConfiguration(NameNodeHttpServer.getConfFromContext(context));
    return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr), ClientProtocol.class).getProxy();
}
Also used : HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) InetSocketAddress(java.net.InetSocketAddress) ServletContext(javax.servlet.ServletContext) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 13 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DFSAdmin method setSafeMode.

/**
   * Safe mode maintenance command.
   * Usage: hdfs dfsadmin -safemode [enter | leave | get | wait | forceExit]
   * @param argv List of of command line parameters.
   * @param idx The index of the command that is being processed.
   * @exception IOException if the filesystem does not exist.
   */
public void setSafeMode(String[] argv, int idx) throws IOException {
    if (idx != argv.length - 1) {
        printUsage("-safemode");
        return;
    }
    HdfsConstants.SafeModeAction action;
    Boolean waitExitSafe = false;
    if ("leave".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE;
    } else if ("enter".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER;
    } else if ("get".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
    } else if ("wait".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
        waitExitSafe = true;
    } else if ("forceExit".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_FORCE_EXIT;
    } else {
        printUsage("-safemode");
        return;
    }
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            ClientProtocol haNn = proxy.getProxy();
            boolean inSafeMode = haNn.setSafeMode(action, false);
            if (waitExitSafe) {
                inSafeMode = waitExitSafeMode(haNn, inSafeMode);
            }
            System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF") + " in " + proxy.getAddress());
        }
    } else {
        boolean inSafeMode = dfs.setSafeMode(action);
        if (waitExitSafe) {
            inSafeMode = waitExitSafeMode(dfs, inSafeMode);
        }
        System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
    }
}
Also used : HdfsConstants(org.apache.hadoop.hdfs.protocol.HdfsConstants) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) SafeModeAction(org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Example 14 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DFSAdmin method setBalancerBandwidth.

/**
   * Command to ask the namenode to set the balancer bandwidth for all of the
   * datanodes.
   * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
   * @param argv List of of command line parameters.
   * @param idx The index of the command that is being processed.
   * @exception IOException 
   */
public int setBalancerBandwidth(String[] argv, int idx) throws IOException {
    long bandwidth;
    int exitCode = -1;
    try {
        bandwidth = Long.parseLong(argv[idx]);
    } catch (NumberFormatException nfe) {
        System.err.println("NumberFormatException: " + nfe.getMessage());
        System.err.println("Usage: hdfs dfsadmin" + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
        return exitCode;
    }
    if (bandwidth < 0) {
        System.err.println("Bandwidth should be a non-negative integer");
        return exitCode;
    }
    FileSystem fs = getFS();
    if (!(fs instanceof DistributedFileSystem)) {
        System.err.println("FileSystem is " + fs.getUri());
        return exitCode;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().setBalancerBandwidth(bandwidth);
            System.out.println("Balancer bandwidth is set to " + bandwidth + " for " + proxy.getAddress());
        }
    } else {
        dfs.setBalancerBandwidth(bandwidth);
        System.out.println("Balancer bandwidth is set to " + bandwidth);
    }
    exitCode = 0;
    return exitCode;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 15 with ClientProtocol

use of org.apache.hadoop.hdfs.protocol.ClientProtocol in project hadoop by apache.

the class DFSAdmin method metaSave.

/**
   * Dumps DFS data structures into specified file.
   * Usage: hdfs dfsadmin -metasave filename
   * @param argv List of of command line parameters.
   * @param idx The index of the command that is being processed.
   * @exception IOException if an error occurred while accessing
   *            the file or path.
   */
public int metaSave(String[] argv, int idx) throws IOException {
    String pathname = argv[idx];
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().metaSave(pathname);
            System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress());
        }
    } else {
        dfs.metaSave(pathname);
        System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri());
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Aggregations

ClientProtocol (org.apache.hadoop.hdfs.protocol.ClientProtocol)21 Configuration (org.apache.hadoop.conf.Configuration)14 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)10 URI (java.net.URI)8 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)8 ProxyAndInfo (org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo)7 IOException (java.io.IOException)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 Path (org.apache.hadoop.fs.Path)4 Test (org.junit.Test)4 DFSClient (org.apache.hadoop.hdfs.DFSClient)3 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3 Field (java.lang.reflect.Field)2 InvocationTargetException (java.lang.reflect.InvocationTargetException)2 InetSocketAddress (java.net.InetSocketAddress)2 FileSystem (org.apache.hadoop.fs.FileSystem)2 StorageType (org.apache.hadoop.fs.StorageType)2 FsPermission (org.apache.hadoop.fs.permission.FsPermission)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 ClientNamenodeProtocolPB (org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB)2