Search in sources :

Example 1 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class ExecuteCommand method submitPlan.

/**
   * Submits plan to a given data node.
   *
   * @param planFile - Plan file name
   * @param planData - Plan data in json format
   * @throws IOException
   */
private void submitPlan(final String planFile, final String planData) throws IOException {
    Preconditions.checkNotNull(planData);
    NodePlan plan = NodePlan.parseJson(planData);
    String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
    Preconditions.checkNotNull(dataNodeAddress);
    ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
    String planHash = DigestUtils.shaHex(planData);
    try {
        // TODO : Support skipping date check.
        dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION, planFile, planData, false);
    } catch (DiskBalancerException ex) {
        LOG.error("Submitting plan on  {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
        throw ex;
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) ClientDatanodeProtocol(org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol) DiskBalancerException(org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)

Example 2 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class PlanCommand method execute.

/**
   * Runs the plan command. This command can be run with various options like
   * <p>
   * -plan -node IP -plan -node hostName -plan -node DatanodeUUID
   *
   * @param cmd - CommandLine
   * @throws Exception
   */
@Override
public void execute(CommandLine cmd) throws Exception {
    StrBuilder result = new StrBuilder();
    String outputLine = "";
    LOG.debug("Processing Plan Command.");
    Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
    verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
    if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
        throw new IllegalArgumentException("A node name is required to create a" + " plan.");
    }
    if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
        this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.BANDWIDTH));
    }
    if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
        this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.MAXERROR));
    }
    readClusterInfo(cmd);
    String output = null;
    if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
        output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
    }
    setOutputPath(output);
    // -plan nodename is the command line argument.
    DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
    if (node == null) {
        throw new IllegalArgumentException("Unable to find the specified node. " + cmd.getOptionValue(DiskBalancerCLI.PLAN));
    }
    this.thresholdPercentage = getThresholdPercentage(cmd);
    LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
    setNodesToProcess(node);
    populatePathNames(node);
    NodePlan plan = null;
    List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
    setPlanParams(plans);
    if (plans.size() > 0) {
        plan = plans.get(0);
    }
    try (FSDataOutputStream beforeStream = create(String.format(DiskBalancerCLI.BEFORE_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
        beforeStream.write(getCluster().toJson().getBytes(StandardCharsets.UTF_8));
    }
    try {
        if (plan != null && plan.getVolumeSetPlans().size() > 0) {
            outputLine = String.format("Writing plan to:");
            recordOutput(result, outputLine);
            final String planFileName = String.format(DiskBalancerCLI.PLAN_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN));
            final String planFileFullName = new Path(getOutputPath(), planFileName).toString();
            recordOutput(result, planFileFullName);
            try (FSDataOutputStream planStream = create(planFileName)) {
                planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
            }
        } else {
            outputLine = String.format("No plan generated. DiskBalancing not needed for node: %s" + " threshold used: %s", cmd.getOptionValue(DiskBalancerCLI.PLAN), this.thresholdPercentage);
            recordOutput(result, outputLine);
        }
        if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
            printToScreen(plans);
        }
    } catch (Exception e) {
        final String errMsg = "Errors while recording the output of plan command.";
        LOG.error(errMsg, e);
        result.appendln(errMsg);
        result.appendln(Throwables.getStackTraceAsString(e));
    }
    getPrintStream().print(result.toString());
}
Also used : Path(org.apache.hadoop.fs.Path) NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) StrBuilder(org.apache.commons.lang.text.StrBuilder) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)

Example 3 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class PlanCommand method printToScreen.

/**
   * Prints a quick summary of the plan to screen.
   *
   * @param plans - List of NodePlans.
   */
private static void printToScreen(List<NodePlan> plans) {
    System.out.println("\nPlan :\n");
    System.out.println(StringUtils.repeat("=", 80));
    System.out.println(StringUtils.center("Source Disk", 30) + StringUtils.center("Dest.Disk", 30) + StringUtils.center("Size", 10) + StringUtils.center("Type", 10));
    for (NodePlan plan : plans) {
        for (Step step : plan.getVolumeSetPlans()) {
            System.out.println(String.format("%s %s %s %s", StringUtils.center(step.getSourceVolume().getPath(), 30), StringUtils.center(step.getDestinationVolume().getPath(), 30), StringUtils.center(step.getSizeString(step.getBytesToMove()), 10), StringUtils.center(step.getDestinationVolume().getStorageType(), 10)));
        }
    }
    System.out.println(StringUtils.repeat("=", 80));
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) Step(org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)

Example 4 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancer method testBalanceDataBetweenMultiplePairsOfVolumes.

@Test
public void testBalanceDataBetweenMultiplePairsOfVolumes() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
    final int blockCount = 1000;
    final int blockSize = 1024;
    // create 3 disks, that means we will have 2 plans
    // Move Data from disk0->disk1 and disk0->disk2.
    final int diskCount = 3;
    final int dataNodeCount = 1;
    final int dataNodeIndex = 0;
    final int sourceDiskIndex = 0;
    final long cap = blockSize * 2L * blockCount;
    MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap, cap }).build();
    try {
        DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
        dataMover.moveDataToSourceDisk();
        NodePlan plan = dataMover.generatePlan();
        // 3 disks , The plan should move data both disks,
        // so we must have 2 plan steps.
        assertEquals(plan.getVolumeSetPlans().size(), 2);
        dataMover.executePlan(plan);
        dataMover.verifyPlanExectionDone();
        dataMover.verifyAllVolumesHaveData();
        dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
    } finally {
        cluster.shutdown();
    }
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 5 with NodePlan

use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.

the class TestDiskBalancerRPC method testgetDiskBalancerBandwidth.

@Test
public void testgetDiskBalancerBandwidth() throws Exception {
    RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
    DataNode dataNode = rpcTestHelper.getDataNode();
    String planHash = rpcTestHelper.getPlanHash();
    int planVersion = rpcTestHelper.getPlanVersion();
    NodePlan plan = rpcTestHelper.getPlan();
    dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
    String bandwidthString = dataNode.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_BANDWIDTH);
    long value = Long.decode(bandwidthString);
    Assert.assertEquals(10L, value);
}
Also used : NodePlan(org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) DiskBalancerDataNode(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode) Test(org.junit.Test)

Aggregations

NodePlan (org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan)41 Test (org.junit.Test)33 DiskBalancerDataNode (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode)21 DiskBalancerCluster (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster)13 NullConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.NullConnector)11 GreedyPlanner (org.apache.hadoop.hdfs.server.diskbalancer.planner.GreedyPlanner)11 DiskBalancerVolume (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)10 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)9 DiskBalancer (org.apache.hadoop.hdfs.server.datanode.DiskBalancer)8 Step (org.apache.hadoop.hdfs.server.diskbalancer.planner.Step)8 Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)3 DiskBalancerWorkStatus (org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus)3 DiskBalancerException (org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException)3 IOException (java.io.IOException)2 URI (java.net.URI)2 ClientDatanodeProtocol (org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol)2 ClusterConnector (org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector)2 DiskBalancerVolumeSet (org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet)2