Search in sources :

Example 6 with DiskBalancerException

use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.

the class CancelCommand method cancelPlan.

/**
   * Cancels a running plan.
   *
   * @param planData - Plan data.
   * @throws IOException
   */
private void cancelPlan(String planData) throws IOException {
    Preconditions.checkNotNull(planData);
    NodePlan plan = NodePlan.parseJson(planData);
    String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
    Preconditions.checkNotNull(dataNodeAddress);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
    String planHash = DigestUtils.shaHex(planData);
    try {
        dataNode.cancelDiskBalancePlan(planHash);
    } catch (DiskBalancerException ex) {
        LOG.error("Cancelling plan on  {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 7 with DiskBalancerException

use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.

the class CancelCommand method cancelPlanUsingHash.

/**
   * Cancels a running plan.
   * @param nodeAddress - Address of the data node.
   * @param hash - Sha512 hash of the plan, which can be read from datanode
   *             using query status command.
   * @throws IOException
   */
private void cancelPlanUsingHash(String nodeAddress, String hash) throws IOException {
    Preconditions.checkNotNull(nodeAddress);
    Preconditions.checkNotNull(hash);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
    try {
        dataNode.cancelDiskBalancePlan(hash);
    } catch (DiskBalancerException ex) {
        LOG.error("Cancelling plan on  {} failed. Result: {}, Message: {}", nodeAddress, ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}
Also used : ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 8 with DiskBalancerException

use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.

the class QueryCommand method execute.

/**
   * Executes the Client Calls.
   *
   * @param cmd - CommandLine
   */
@Override
public void execute(CommandLine cmd) throws Exception {
    LOG.info("Executing \"query plan\" command.");
    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.QUERY));
    verifyCommandOptions(DiskBalancerCLI.QUERY, cmd);
    String nodeName = cmd.getOptionValue(DiskBalancerCLI.QUERY);
    Preconditions.checkNotNull(nodeName);
    nodeName = nodeName.trim();
    String nodeAddress = nodeName;
    // if the string is not name:port format use the default port.
    if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
        int defaultIPC = NetUtils.createSocketAddr(getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY, DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
        nodeAddress = nodeName + ":" + defaultIPC;
        LOG.debug("Using default data node port :  {}", nodeAddress);
    }
    ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
    try {
        DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
        System.out.printf("Plan File: %s%nPlan ID: %s%nResult: %s%n", workStatus.getPlanFile(), workStatus.getPlanID(), workStatus.getResult().toString());
        if (cmd.hasOption(DiskBalancerCLI.VERBOSE)) {
            System.out.printf("%s", workStatus.currentStateString());
        }
    } catch (DiskBalancerException ex) {
        LOG.error("Query plan failed. ex: {}", ex);
        throw ex;
    }
}
Also used : DiskBalancerWorkStatus(org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 9 with DiskBalancerException

use of org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException in project hadoop by apache.

the class ReportCommand method handleNodeReport.

private void handleNodeReport(final CommandLine cmd, StrBuilder result, final String nodeFormat, final String volumeFormat) throws Exception {
    String outputLine = "";
    /*
     * get value that identifies DataNode(s) from command line, it could be
     * UUID, IP address or host name.
     */
    final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
    if (StringUtils.isBlank(nodeVal)) {
        outputLine = "The value for '-node' is neither specified or empty.";
        recordOutput(result, outputLine);
    } else {
        /*
       * Reporting volume information for specific DataNode(s)
       */
        outputLine = String.format("Reporting volume information for DataNode(s) '%s'.", nodeVal);
        recordOutput(result, outputLine);
        List<DiskBalancerDataNode> dbdns = Lists.newArrayList();
        try {
            dbdns = getNodes(nodeVal);
        } catch (DiskBalancerException e) {
            // If there are some invalid nodes that contained in nodeVal,
            // the exception will be threw.
            recordOutput(result, e.getMessage());
            return;
        }
        if (!dbdns.isEmpty()) {
            for (DiskBalancerDataNode node : dbdns) {
                recordNodeReport(result, node, nodeFormat, volumeFormat);
                result.append(System.lineSeparator());
            }
        }
    }
}
Also used : DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Aggregations

DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)9 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)4 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)3 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)2 IOException (java.io.IOException)1 HashMap (java.util.HashMap)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 DiskBalancerWorkStatus (org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus)1 FsDatasetSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi)1 FsVolumeReferences (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi.FsVolumeReferences)1 FsVolumeSpi (org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi)1 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)1 AutoCloseableLock (org.apache.hadoop.util.AutoCloseableLock)1