Search in sources :

Example 6 with ProxyAndInfo

use of org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo in project hadoop by apache.

the class HAUtil method getProxiesForAllNameNodesInNameservice.

/**
   * Get an RPC proxy for each NN in an HA nameservice. Used when a given RPC
   * call should be made on every NN in an HA nameservice, not just the active.
   *
   * @param conf configuration
   * @param nsId the nameservice to get all of the proxies for.
   * @param xface the protocol class.
   * @return a list of RPC proxies for each NN in the nameservice.
   * @throws IOException in the event of error.
   */
public static <T> List<ProxyAndInfo<T>> getProxiesForAllNameNodesInNameservice(Configuration conf, String nsId, Class<T> xface) throws IOException {
    Map<String, InetSocketAddress> nnAddresses = DFSUtil.getRpcAddressesForNameserviceId(conf, nsId, null);
    List<ProxyAndInfo<T>> proxies = new ArrayList<ProxyAndInfo<T>>(nnAddresses.size());
    for (InetSocketAddress nnAddress : nnAddresses.values()) {
        ProxyAndInfo<T> proxyInfo = NameNodeProxies.createNonHAProxy(conf, nnAddress, xface, UserGroupInformation.getCurrentUser(), false);
        proxies.add(proxyInfo);
    }
    return proxies;
}
Also used : ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) InetSocketAddress(java.net.InetSocketAddress) ArrayList(java.util.ArrayList)

Example 7 with ProxyAndInfo

use of org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo in project hadoop by apache.

the class DFSAdmin method setSafeMode.

/**
   * Safe mode maintenance command.
   * Usage: hdfs dfsadmin -safemode [enter | leave | get | wait | forceExit]
   * @param argv List of of command line parameters.
   * @param idx The index of the command that is being processed.
   * @exception IOException if the filesystem does not exist.
   */
public void setSafeMode(String[] argv, int idx) throws IOException {
    if (idx != argv.length - 1) {
        printUsage("-safemode");
        return;
    }
    HdfsConstants.SafeModeAction action;
    Boolean waitExitSafe = false;
    if ("leave".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_LEAVE;
    } else if ("enter".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_ENTER;
    } else if ("get".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
    } else if ("wait".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_GET;
        waitExitSafe = true;
    } else if ("forceExit".equalsIgnoreCase(argv[idx])) {
        action = HdfsConstants.SafeModeAction.SAFEMODE_FORCE_EXIT;
    } else {
        printUsage("-safemode");
        return;
    }
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            ClientProtocol haNn = proxy.getProxy();
            boolean inSafeMode = haNn.setSafeMode(action, false);
            if (waitExitSafe) {
                inSafeMode = waitExitSafeMode(haNn, inSafeMode);
            }
            System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF") + " in " + proxy.getAddress());
        }
    } else {
        boolean inSafeMode = dfs.setSafeMode(action);
        if (waitExitSafe) {
            inSafeMode = waitExitSafeMode(dfs, inSafeMode);
        }
        System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
    }
}
Also used : HdfsConstants(org.apache.hadoop.hdfs.protocol.HdfsConstants) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) SafeModeAction(org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Example 8 with ProxyAndInfo

use of org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo in project hadoop by apache.

the class DFSAdmin method setBalancerBandwidth.

/**
   * Command to ask the namenode to set the balancer bandwidth for all of the
   * datanodes.
   * Usage: hdfs dfsadmin -setBalancerBandwidth bandwidth
   * @param argv List of of command line parameters.
   * @param idx The index of the command that is being processed.
   * @exception IOException 
   */
public int setBalancerBandwidth(String[] argv, int idx) throws IOException {
    long bandwidth;
    int exitCode = -1;
    try {
        bandwidth = Long.parseLong(argv[idx]);
    } catch (NumberFormatException nfe) {
        System.err.println("NumberFormatException: " + nfe.getMessage());
        System.err.println("Usage: hdfs dfsadmin" + " [-setBalancerBandwidth <bandwidth in bytes per second>]");
        return exitCode;
    }
    if (bandwidth < 0) {
        System.err.println("Bandwidth should be a non-negative integer");
        return exitCode;
    }
    FileSystem fs = getFS();
    if (!(fs instanceof DistributedFileSystem)) {
        System.err.println("FileSystem is " + fs.getUri());
        return exitCode;
    }
    DistributedFileSystem dfs = (DistributedFileSystem) fs;
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().setBalancerBandwidth(bandwidth);
            System.out.println("Balancer bandwidth is set to " + bandwidth + " for " + proxy.getAddress());
        }
    } else {
        dfs.setBalancerBandwidth(bandwidth);
        System.out.println("Balancer bandwidth is set to " + bandwidth);
    }
    exitCode = 0;
    return exitCode;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) URI(java.net.URI) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol)

Example 9 with ProxyAndInfo

use of org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo in project hadoop by apache.

the class DFSAdmin method metaSave.

/**
   * Dumps DFS data structures into specified file.
   * Usage: hdfs dfsadmin -metasave filename
   * @param argv List of of command line parameters.
   * @param idx The index of the command that is being processed.
   * @exception IOException if an error occurred while accessing
   *            the file or path.
   */
public int metaSave(String[] argv, int idx) throws IOException {
    String pathname = argv[idx];
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaEnabled) {
        String nsId = dfsUri.getHost();
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().metaSave(pathname);
            System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress());
        }
    } else {
        dfs.metaSave(pathname);
        System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri());
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Example 10 with ProxyAndInfo

use of org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo in project hadoop by apache.

the class DFSAdmin method finalizeUpgrade.

/**
   * Command to ask the namenode to finalize previously performed upgrade.
   * Usage: hdfs dfsadmin -finalizeUpgrade
   * @exception IOException 
   */
public int finalizeUpgrade() throws IOException {
    DistributedFileSystem dfs = getDFS();
    Configuration dfsConf = dfs.getConf();
    URI dfsUri = dfs.getUri();
    boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri);
    if (isHaAndLogicalUri) {
        // In the case of HA and logical URI, run finalizeUpgrade for all
        // NNs in this nameservice.
        String nsId = dfsUri.getHost();
        List<ClientProtocol> namenodes = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
        if (!HAUtil.isAtLeastOneActive(namenodes)) {
            throw new IOException("Cannot finalize with no NameNode active");
        }
        List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class);
        for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
            proxy.getProxy().finalizeUpgrade();
            System.out.println("Finalize upgrade successful for " + proxy.getAddress());
        }
    } else {
        dfs.finalizeUpgrade();
        System.out.println("Finalize upgrade successful");
    }
    return 0;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) ProxyAndInfo(org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo) IOException(java.io.IOException) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) ClientProtocol(org.apache.hadoop.hdfs.protocol.ClientProtocol) URI(java.net.URI)

Aggregations

ProxyAndInfo (org.apache.hadoop.hdfs.NameNodeProxiesClient.ProxyAndInfo)13 URI (java.net.URI)11 Configuration (org.apache.hadoop.conf.Configuration)11 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)11 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)11 ClientProtocol (org.apache.hadoop.hdfs.protocol.ClientProtocol)7 RefreshUserMappingsProtocol (org.apache.hadoop.security.RefreshUserMappingsProtocol)2 RefreshAuthorizationPolicyProtocol (org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol)2 IOException (java.io.IOException)1 InetSocketAddress (java.net.InetSocketAddress)1 ArrayList (java.util.ArrayList)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 HdfsConstants (org.apache.hadoop.hdfs.protocol.HdfsConstants)1 SafeModeAction (org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction)1 JournalProtocol (org.apache.hadoop.hdfs.server.protocol.JournalProtocol)1 Text (org.apache.hadoop.io.Text)1 RefreshCallQueueProtocol (org.apache.hadoop.ipc.RefreshCallQueueProtocol)1 GetUserMappingsProtocol (org.apache.hadoop.tools.GetUserMappingsProtocol)1