use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class ExecuteCommand method submitPlan.
/**
* Submits plan to a given data node.
*
* @param planFile - Plan file name
* @param planData - Plan data in json format
* @throws IOException
*/
private void submitPlan(final String planFile, final String planData) throws IOException {
Preconditions.checkNotNull(planData);
NodePlan plan = NodePlan.parseJson(planData);
String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
Preconditions.checkNotNull(dataNodeAddress);
ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);
String planHash = DigestUtils.shaHex(planData);
try {
// TODO : Support skipping date check.
dataNode.submitDiskBalancerPlan(planHash, DiskBalancerCLI.PLAN_VERSION, planFile, planData, false);
} catch (DiskBalancerException ex) {
LOG.error("Submitting plan on {} failed. Result: {}, Message: {}", plan.getNodeName(), ex.getResult().toString(), ex.getMessage());
throw ex;
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class PlanCommand method execute.
/**
* Runs the plan command. This command can be run with various options like
* <p>
* -plan -node IP -plan -node hostName -plan -node DatanodeUUID
*
* @param cmd - CommandLine
* @throws Exception
*/
@Override
public void execute(CommandLine cmd) throws Exception {
StrBuilder result = new StrBuilder();
String outputLine = "";
LOG.debug("Processing Plan Command.");
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
throw new IllegalArgumentException("A node name is required to create a" + " plan.");
}
if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.BANDWIDTH));
}
if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.MAXERROR));
}
readClusterInfo(cmd);
String output = null;
if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
}
setOutputPath(output);
// -plan nodename is the command line argument.
DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
if (node == null) {
throw new IllegalArgumentException("Unable to find the specified node. " + cmd.getOptionValue(DiskBalancerCLI.PLAN));
}
this.thresholdPercentage = getThresholdPercentage(cmd);
LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
setNodesToProcess(node);
populatePathNames(node);
NodePlan plan = null;
List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
setPlanParams(plans);
if (plans.size() > 0) {
plan = plans.get(0);
}
try (FSDataOutputStream beforeStream = create(String.format(DiskBalancerCLI.BEFORE_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
beforeStream.write(getCluster().toJson().getBytes(StandardCharsets.UTF_8));
}
try {
if (plan != null && plan.getVolumeSetPlans().size() > 0) {
outputLine = String.format("Writing plan to:");
recordOutput(result, outputLine);
final String planFileName = String.format(DiskBalancerCLI.PLAN_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN));
final String planFileFullName = new Path(getOutputPath(), planFileName).toString();
recordOutput(result, planFileFullName);
try (FSDataOutputStream planStream = create(planFileName)) {
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
}
} else {
outputLine = String.format("No plan generated. DiskBalancing not needed for node: %s" + " threshold used: %s", cmd.getOptionValue(DiskBalancerCLI.PLAN), this.thresholdPercentage);
recordOutput(result, outputLine);
}
if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
printToScreen(plans);
}
} catch (Exception e) {
final String errMsg = "Errors while recording the output of plan command.";
LOG.error(errMsg, e);
result.appendln(errMsg);
result.appendln(Throwables.getStackTraceAsString(e));
}
getPrintStream().print(result.toString());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class PlanCommand method printToScreen.
/**
* Prints a quick summary of the plan to screen.
*
* @param plans - List of NodePlans.
*/
private static void printToScreen(List<NodePlan> plans) {
System.out.println("\nPlan :\n");
System.out.println(StringUtils.repeat("=", 80));
System.out.println(StringUtils.center("Source Disk", 30) + StringUtils.center("Dest.Disk", 30) + StringUtils.center("Size", 10) + StringUtils.center("Type", 10));
for (NodePlan plan : plans) {
for (Step step : plan.getVolumeSetPlans()) {
System.out.println(String.format("%s %s %s %s", StringUtils.center(step.getSourceVolume().getPath(), 30), StringUtils.center(step.getDestinationVolume().getPath(), 30), StringUtils.center(step.getSizeString(step.getBytesToMove()), 10), StringUtils.center(step.getDestinationVolume().getStorageType(), 10)));
}
}
System.out.println(StringUtils.repeat("=", 80));
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancer method testBalanceDataBetweenMultiplePairsOfVolumes.
@Test
public void testBalanceDataBetweenMultiplePairsOfVolumes() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_DISK_BALANCER_ENABLED, true);
final int blockCount = 1000;
final int blockSize = 1024;
// create 3 disks, that means we will have 2 plans
// Move Data from disk0->disk1 and disk0->disk2.
final int diskCount = 3;
final int dataNodeCount = 1;
final int dataNodeIndex = 0;
final int sourceDiskIndex = 0;
final long cap = blockSize * 2L * blockCount;
MiniDFSCluster cluster = new ClusterBuilder().setBlockCount(blockCount).setBlockSize(blockSize).setDiskCount(diskCount).setNumDatanodes(dataNodeCount).setConf(conf).setCapacities(new long[] { cap, cap, cap }).build();
try {
DataMover dataMover = new DataMover(cluster, dataNodeIndex, sourceDiskIndex, conf, blockSize, blockCount);
dataMover.moveDataToSourceDisk();
NodePlan plan = dataMover.generatePlan();
// 3 disks , The plan should move data both disks,
// so we must have 2 plan steps.
assertEquals(plan.getVolumeSetPlans().size(), 2);
dataMover.executePlan(plan);
dataMover.verifyPlanExectionDone();
dataMover.verifyAllVolumesHaveData();
dataMover.verifyTolerance(plan, 0, sourceDiskIndex, 10);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan in project hadoop by apache.
the class TestDiskBalancerRPC method testgetDiskBalancerBandwidth.
@Test
public void testgetDiskBalancerBandwidth() throws Exception {
RpcTestHelper rpcTestHelper = new RpcTestHelper().invoke();
DataNode dataNode = rpcTestHelper.getDataNode();
String planHash = rpcTestHelper.getPlanHash();
int planVersion = rpcTestHelper.getPlanVersion();
NodePlan plan = rpcTestHelper.getPlan();
dataNode.submitDiskBalancerPlan(planHash, planVersion, PLAN_FILE, plan.toJson(), false);
String bandwidthString = dataNode.getDiskBalancerSetting(DiskBalancerConstants.DISKBALANCER_BANDWIDTH);
long value = Long.decode(bandwidthString);
Assert.assertEquals(10L, value);
}
Aggregations