use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class Command method getNodes.
/**
* Returns a DiskBalancer Node list from the Cluster or null if not found.
*
* @param listArg String File URL or a comma separated list of node names.
* @return List of DiskBalancer Node
* @throws IOException
*/
protected List<DiskBalancerDataNode> getNodes(String listArg) throws IOException {
Set<String> nodeNames = null;
List<DiskBalancerDataNode> nodeList = Lists.newArrayList();
List<String> invalidNodeList = Lists.newArrayList();
if ((listArg == null) || listArg.isEmpty()) {
return nodeList;
}
nodeNames = getNodeList(listArg);
DiskBalancerDataNode node = null;
if (!nodeNames.isEmpty()) {
for (String name : nodeNames) {
node = getNode(name);
if (node != null) {
nodeList.add(node);
} else {
invalidNodeList.add(name);
}
}
}
if (!invalidNodeList.isEmpty()) {
String invalidNodes = StringUtils.join(invalidNodeList.toArray(), ",");
String warnMsg = String.format("The node(s) '%s' not found. " + "Please make sure that '%s' exists in the cluster.", invalidNodes, invalidNodes);
throw new DiskBalancerException(warnMsg, DiskBalancerException.Result.INVALID_NODE);
}
return nodeList;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class DBNameNodeConnector method getBalancerNodeFromDataNode.
/**
* This function maps the required fields from DataNodeInfo to disk
* BalancerDataNode.
*
* @param nodeInfo
* @return DiskBalancerDataNode
*/
private DiskBalancerDataNode getBalancerNodeFromDataNode(DatanodeInfo nodeInfo) {
Preconditions.checkNotNull(nodeInfo);
DiskBalancerDataNode dbDataNode = new DiskBalancerDataNode(nodeInfo.getDatanodeUuid());
dbDataNode.setDataNodeIP(nodeInfo.getIpAddr());
dbDataNode.setDataNodeName(nodeInfo.getHostName());
dbDataNode.setDataNodePort(nodeInfo.getIpcPort());
return dbDataNode;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class PlanCommand method execute.
/**
* Runs the plan command. This command can be run with various options like
* <p>
* -plan -node IP -plan -node hostName -plan -node DatanodeUUID
*
* @param cmd - CommandLine
* @throws Exception
*/
@Override
public void execute(CommandLine cmd) throws Exception {
StrBuilder result = new StrBuilder();
String outputLine = "";
LOG.debug("Processing Plan Command.");
Preconditions.checkState(cmd.hasOption(DiskBalancerCLI.PLAN));
verifyCommandOptions(DiskBalancerCLI.PLAN, cmd);
if (cmd.getOptionValue(DiskBalancerCLI.PLAN) == null) {
throw new IllegalArgumentException("A node name is required to create a" + " plan.");
}
if (cmd.hasOption(DiskBalancerCLI.BANDWIDTH)) {
this.bandwidth = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.BANDWIDTH));
}
if (cmd.hasOption(DiskBalancerCLI.MAXERROR)) {
this.maxError = Integer.parseInt(cmd.getOptionValue(DiskBalancerCLI.MAXERROR));
}
readClusterInfo(cmd);
String output = null;
if (cmd.hasOption(DiskBalancerCLI.OUTFILE)) {
output = cmd.getOptionValue(DiskBalancerCLI.OUTFILE);
}
setOutputPath(output);
// -plan nodename is the command line argument.
DiskBalancerDataNode node = getNode(cmd.getOptionValue(DiskBalancerCLI.PLAN));
if (node == null) {
throw new IllegalArgumentException("Unable to find the specified node. " + cmd.getOptionValue(DiskBalancerCLI.PLAN));
}
this.thresholdPercentage = getThresholdPercentage(cmd);
LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
setNodesToProcess(node);
populatePathNames(node);
NodePlan plan = null;
List<NodePlan> plans = getCluster().computePlan(this.thresholdPercentage);
setPlanParams(plans);
if (plans.size() > 0) {
plan = plans.get(0);
}
try (FSDataOutputStream beforeStream = create(String.format(DiskBalancerCLI.BEFORE_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN)))) {
beforeStream.write(getCluster().toJson().getBytes(StandardCharsets.UTF_8));
}
try {
if (plan != null && plan.getVolumeSetPlans().size() > 0) {
outputLine = String.format("Writing plan to:");
recordOutput(result, outputLine);
final String planFileName = String.format(DiskBalancerCLI.PLAN_TEMPLATE, cmd.getOptionValue(DiskBalancerCLI.PLAN));
final String planFileFullName = new Path(getOutputPath(), planFileName).toString();
recordOutput(result, planFileFullName);
try (FSDataOutputStream planStream = create(planFileName)) {
planStream.write(plan.toJson().getBytes(StandardCharsets.UTF_8));
}
} else {
outputLine = String.format("No plan generated. DiskBalancing not needed for node: %s" + " threshold used: %s", cmd.getOptionValue(DiskBalancerCLI.PLAN), this.thresholdPercentage);
recordOutput(result, outputLine);
}
if (cmd.hasOption(DiskBalancerCLI.VERBOSE) && plans.size() > 0) {
printToScreen(plans);
}
} catch (Exception e) {
final String errMsg = "Errors while recording the output of plan command.";
LOG.error(errMsg, e);
result.appendln(errMsg);
result.appendln(Throwables.getStackTraceAsString(e));
}
getPrintStream().print(result.toString());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestDiskBalancerCommand method testGetNodeList.
@Test(timeout = 60000)
public void testGetNodeList() throws Exception {
ClusterConnector jsonConnector = ConnectorFactory.getCluster(clusterJson, conf);
DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster(jsonConnector);
diskBalancerCluster.readClusterInfo();
int nodeNum = 5;
StringBuilder listArg = new StringBuilder();
for (int i = 0; i < nodeNum; i++) {
listArg.append(diskBalancerCluster.getNodes().get(i).getDataNodeUUID()).append(",");
}
ReportCommand command = new ReportCommand(conf, null);
command.setCluster(diskBalancerCluster);
List<DiskBalancerDataNode> nodeList = command.getNodes(listArg.toString());
assertEquals(nodeNum, nodeList.size());
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class TestDataModels method testCreateRandomDataNode.
@Test
public void testCreateRandomDataNode() throws Exception {
DiskBalancerTestUtil util = new DiskBalancerTestUtil();
DiskBalancerDataNode node = util.createRandomDataNode(new StorageType[] { StorageType.DISK, StorageType.RAM_DISK }, 10);
Assert.assertNotNull(node.getNodeDataDensity());
}
Aggregations