Search in sources :

Example 16 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class QueryCommand method execute.

/**
   * Executes the Client Calls.
   *
   * @param cmd - CommandLine
   */
@Override
public void execute(CommandLine cmd) throws Exception {
    LOG.info("Executing \"query plan\" command.");
    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
    verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
    String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
    Preconditions.checkNotNull(nodeName);
    nodeName = nodeName.trim();
    String nodeAddress = nodeName;
    // if the string is not name:port format use the default port.
    if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
        int defaultIPC = NetUtils.createSocketAddr(getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
        nodeAddress = nodeName + ":" + defaultIPC;
        LOG.debug("Using default data node port :  {}", nodeAddress);
    }
    ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
    try {
        DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
        System.out.printf("Plan File: %s%nPlan ID: %s%nResult: %s%n", workStatus.getPlanFile(), workStatus.getPlanID(), workStatus.getResult().toString());
        if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
            System.out.printf("%s", workStatus.currentStateString());
        }
    } catch (DiskBalancerException ex) {
        LOG.error("Query plan failed. ex: {}", ex);
        throw ex;
    }
}
Also used : DiskBalancerWorkStatus(org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 17 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method refreshNamenodes.

private int refreshNamenodes(String[] argv, int i) throws IOException {
    String datanode = argv[i];
    ClientDatanodeProtocol refreshProtocol = getDataNodeProxy(datanode);
    refreshProtocol.refreshNamenodes();
    return 0;
}
Also used : ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 18 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method getDatanodeInfo.

private int getDatanodeInfo(String[] argv, int i) throws IOException {
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
    try {
        DatanodeLocalInfo dnInfo = dnProxy.getDatanodeInfo();
        System.out.println(dnInfo.getDatanodeLocalReport());
    } catch (IOException ioe) {
        throw new IOException("Datanode unreachable. " + ioe, ioe);
    }
    return 0;
}
Also used : IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DatanodeLocalInfo(org.apache.hadoop.hdfs.protocol.DatanodeLocalInfo)

Example 19 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method getDataNodeProxy.

private ClientDatanodeProtocol getDataNodeProxy(String datanode) throws IOException {
    InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
    // Get the current configuration
    Configuration conf = getConf();
    // For datanode proxy the server principal should be DN's one.
    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));
    // Create the client
    ClientDatanodeProtocol dnProtocol = DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
    return dnProtocol;
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) InetSocketAddress(java.net.InetSocketAddress) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)

Example 20 with ClientDatanodeProtocol

use of org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol in project hadoop by apache.

the class DFSAdmin method triggerBlockReport.

public int triggerBlockReport(String[] argv) throws IOException {
    List<String> args = new LinkedList<String>();
    for (int j = 1; j < argv.length; j++) {
        args.add(argv[j]);
    }
    boolean incremental = StringUtils.popOption("-incremental", args);
    String hostPort = StringUtils.popFirstNonOption(args);
    if (hostPort == null) {
        System.err.println("You must specify a host:port pair.");
        return 1;
    }
    if (!args.isEmpty()) {
        System.err.print("Can't understand arguments: " + Joiner.on(" ").join(args) + "\n");
        return 1;
    }
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(hostPort);
    try {
        dnProxy.triggerBlockReport(new BlockReportOptions.Factory().setIncremental(incremental).build());
    } catch (IOException e) {
        System.err.println("triggerBlockReport error: " + e);
        return 1;
    }
    System.out.println("Triggering " + (incremental ? "an incremental " : "a full ") + "block report on " + hostPort + ".");
    return 0;
}
Also used : BlockReportOptions(org.apache.hadoop.hdfs.client.BlockReportOptions) IOException(java.io.IOException) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) LinkedList(java.util.LinkedList)

Aggregations

ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)22 IOException (java.io.IOException)7 InetSocketAddress (java.net.InetSocketAddress)5 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)5 Configuration (org.apache.hadoop.conf.Configuration)4 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)4 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)4 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)3 Server (org.apache.hadoop.ipc.Server)3 Test (org.junit.Test)3 LinkedList (java.util.LinkedList)2 Path (org.apache.hadoop.fs.Path)2 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 Block (org.apache.hadoop.hdfs.protocol.Block)2 BlockLocalPathInfo (org.apache.hadoop.hdfs.protocol.BlockLocalPathInfo)2 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 BlockTokenIdentifier (org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier)2 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)2