Search in sources :

Example 1 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class Command method getNodes.

/**
   * Returns a DiskBalancer Node list from the Cluster or null if not found.
   *
   * @param listArg String File URL or a comma separated list of node names.
   * @return List of DiskBalancer Node
   * @throws IOException
   */
protected List<DiskBalancerDataNode> getNodes(String listArg) throws IOException {
    Set<String> nodeNames = null;
    List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
    List<String> invalidNodeList = Lists.newArrayList();
    if ((listArg == null) || listArg.isEmpty()) {
        return nodeList;
    }
    nodeNames = getNodeList(listArg);
    DiskBalancerDataNode node = null;
    if (!nodeNames.isEmpty()) {
        for (String name : nodeNames) {
            node = getNode(name);
            if (node != null) {
                nodeList.add(node);
            } else {
                invalidNodeList.add(name);
            }
        }
    }
    if (!invalidNodeList.isEmpty()) {
        String invalidNodes = StringUtils.join(invalidNodeList.toArray(), ",");
        String warnMsg = String.format("The node(s) '%s' not found. " + "Please make sure that '%s' exists in the cluster.", invalidNodes, invalidNodes);
        throw new DiskBalancerException(warnMsg, DiskBalancerException.Result.INVALID_NODE);
    }
    return nodeList;
}
Also used : DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Example 2 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class DBNameNodeConnector method getBalancerNodeFromDataNode.

/**
   * This function maps the required fields from DataNodeInfo to disk
   * BalancerDataNode.
   *
   * @param nodeInfo
   * @return DiskBalancerDataNode
   */
private DiskBalancerDataNode getBalancerNodeFromDataNode(DatanodeInfo nodeInfo) {
    Preconditions.checkNotNull(nodeInfo);
    DiskBalancerDataNode dbDataNode = new DiskBalancerDataNode(nodeInfo.getDatanodeUuid());
    dbDataNode.setDataNodeIP(nodeInfo.getIpAddr());
    dbDataNode.setDataNodeName(nodeInfo.getHostName());
    dbDataNode.setDataNodePort(nodeInfo.getIpcPort());
    return dbDataNode;
}
Also used : DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Example 3 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class PlanCommand method execute.

/**
   * Runs the plan command. This command can be run with various options like
   * <p>
   * -plan -node IP -plan -node hostName -plan -node DatanodeUUID
   *
   * @param cmd - CommandLine
   * @throws Exception
   */
@Override
public void execute(CommandLine cmd) throws Exception {
    StrBuilder result = new StrBuilder();
    String outputLine = "";
    LOG.debug("Processing Plan Command.");
    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
    verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
    if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
        throw new IllegalArgumentException("A node name is required to create a" + " plan.");
    }
    if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
        this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.BANDWIDTH));
    }
    if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
        this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.MAXERROR));
    }
    readClusterInfo(cmd);
    String output = null;
    if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
        output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
    }
    setOutputPath(output);
    // -plan nodename is the command line argument.
    DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
    if (node == null) {
        throw new IllegalArgumentException("Unable to find the specified node. " + cmd.getOptionValue(DiskBalancerCLI.PLAN));
    }
    this.thresholdPercentage = getThresholdPercentage(cmd);
    LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
    setNodesToProcess(node);
    populatePathNames(node);
    NodePlan plan = null;
    List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
    setPlanParams(plans);
    if (plans.size() > 0) {
        plan = plans.get(0);
    }
    try (FSDataOutputStream beforeStream = create(String.format(DiskBalancerCLI.BEFORE_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
        beforeStream.write(getCluster().toJson().getBytes(StandardCharsets.UTF_8));
    }
    try {
        if (plan != null && plan.getVolumeSetPlans().size() > 0) {
            outputLine = String.format("Writing plan to:");
            recordOutput(result, outputLine);
            final String planFileName = String.format(DiskBalancerCLI.PLAN_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN));
            final String planFileFullName = new Path(getOutputPath(), planFileName).toString();
            recordOutput(result, planFileFullName);
            try (FSDataOutputStream planStream = create(planFileName)) {
                planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
            }
        } else {
            outputLine = String.format("No plan generated. DiskBalancing not needed for node: %s" + " threshold used: %s", cmd.getOptionValue(DiskBalancerCLI.PLAN), this.thresholdPercentage);
            recordOutput(result, outputLine);
        }
        if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
            printToScreen(plans);
        }
    } catch (Exception e) {
        final String errMsg = "Errors while recording the output of plan command.";
        LOG.error(errMsg, e);
        result.appendln(errMsg);
        result.appendln(Throwables.getStackTraceAsString(e));
    }
    getPrintStream().print(result.toString());
}
Also used : Path(org.apache.hadoop.fs.Path) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) StrBuilder(org.apache.commons.lang.text.StrBuilder) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Example 4 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDiskBalancerCommand method testGetNodeList.

@Test(timeout = 60000)
public void testGetNodeList() throws Exception {
    ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, conf);
    DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(jsonConnector);
    diskBalancerCluster.readClusterInfo();
    int nodeNum = 5;
    StringBuilder listArg = new StringBuilder();
    for (int i = 0; i < nodeNum; i++) {
        listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID()).append(",");
    }
    ReportCommand command = new ReportCommand(conf, null);
    command.setCluster(diskBalancerCluster);
    List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
    assertEquals(nodeNum, nodeList.size());
}
Also used : ClusterConnector(org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector) DiskBalancerCluster(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Example 5 with DiskBalancerDataNode

use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.

the class TestDataModels method testCreateRandomDataNode.

@Test
public void testCreateRandomDataNode() throws Exception {
    DiskBalancerTestUtil util = new DiskBalancerTestUtil();
    DiskBalancerDataNode node = util.createRandomDataNode(new StorageType[] { StorageType.DISK, StorageType.RAM_DISK }, 10);
    Assert.assertNotNull(node.getNodeDataDensity());
}
Also used : DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)27 Test (org.junit.Test)19 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)16 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)12 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)10 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)7 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)5 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)3 LinkedList (java.util.LinkedList)2 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)2 URI (java.net.URI)1 StrBuilder (org.apache.commons.lang.text.StrBuilder)1 Configuration (org.apache.hadoop.conf.Configuration)1 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)1 Path (org.apache.hadoop.fs.Path)1 StorageType (org.apache.hadoop.fs.StorageType)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)1