use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class Command method getNode.
/**
* Returns a DiskBalancer Node from the Cluster or null if not found.
*
* @param nodeName - can the hostname, IP address or UUID of the node.
* @return - DataNode if found.
*/
DiskBalancerDataNode getNode(String nodeName) {
DiskBalancerDataNode node = null;
if (nodeName == null || nodeName.isEmpty()) {
return node;
}
if (cluster.getNodes().size() == 0) {
return node;
}
node = cluster.getNodeByName(nodeName);
if (node != null) {
return node;
}
node = cluster.getNodeByIPAddress(nodeName);
if (node != null) {
return node;
}
node = cluster.getNodeByUUID(nodeName);
return node;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class DBNameNodeConnector method getNodes.
/**
* getNodes function returns a list of DiskBalancerDataNodes.
*
* @return Array of DiskBalancerDataNodes
*/
@Override
public List<DiskBalancerDataNode> getNodes() throws Exception {
Preconditions.checkNotNull(this.connector);
List<DiskBalancerDataNode> nodeList = new LinkedList<>();
DatanodeStorageReport[] reports = this.connector.getLiveDatanodeStorageReport();
for (DatanodeStorageReport report : reports) {
DiskBalancerDataNode datanode = getBalancerNodeFromDataNode(report.getDatanodeInfo());
getVolumeInfoFromStorageReports(datanode, report.getStorageReports());
nodeList.add(datanode);
}
return nodeList;
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class ReportCommand method handleNodeReport.
private void handleNodeReport(final CommandLine cmd, StrBuilder result, final String nodeFormat, final String volumeFormat) throws Exception {
String outputLine = "";
/*
* get value that identifies DataNode(s) from command line, it could be
* UUID, IP address or host name.
*/
final String nodeVal = cmd.getOptionValue(DiskBalancerCLI.NODE);
if (StringUtils.isBlank(nodeVal)) {
outputLine = "The value for '-node' is neither specified or empty.";
recordOutput(result, outputLine);
} else {
/*
* Reporting volume information for specific DataNode(s)
*/
outputLine = String.format("Reporting volume information for DataNode(s) '%s'.", nodeVal);
recordOutput(result, outputLine);
List<DiskBalancerDataNode> dbdns = Lists.newArrayList();
try {
dbdns = getNodes(nodeVal);
} catch (DiskBalancerException e) {
// If there are some invalid nodes that contained in nodeVal,
// the exception will be threw.
recordOutput(result, e.getMessage());
return;
}
if (!dbdns.isEmpty()) {
for (DiskBalancerDataNode node : dbdns) {
recordNodeReport(result, node, nodeFormat, volumeFormat);
result.append(System.lineSeparator());
}
}
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class ReportCommand method handleTopReport.
private void handleTopReport(final CommandLine cmd, final StrBuilder result, final String nodeFormat) {
Collections.sort(getCluster().getNodes(), Collections.reverseOrder());
/* extract value that identifies top X DataNode(s) */
setTopNodes(parseTopNodes(cmd, result));
/*
* Reporting volume information of top X DataNode(s) in summary
*/
final String outputLine = String.format("Reporting top %d DataNode(s) benefiting from running DiskBalancer.", getTopNodes());
recordOutput(result, outputLine);
ListIterator<DiskBalancerDataNode> li = getCluster().getNodes().listIterator();
for (int i = 0; i < getTopNodes() && li.hasNext(); i++) {
DiskBalancerDataNode dbdn = li.next();
result.appendln(String.format(nodeFormat, i + 1, getTopNodes(), dbdn.getDataNodeName(), dbdn.getDataNodeIP(), dbdn.getDataNodePort(), dbdn.getDataNodeUUID(), dbdn.getVolumeCount(), dbdn.getNodeDataDensity()));
}
}
use of org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode in project hadoop by apache.
the class DiskBalancerTestUtil method createRandomDataNode.
/**
* Creates a RandomDataNode.
*
* @param diskTypes - Storage types needed in the Node
* @param diskCount - Disk count - that many disks of each type is created
* @return DataNode
* @throws Exception
*/
public DiskBalancerDataNode createRandomDataNode(StorageType[] diskTypes, int diskCount) throws Exception {
Preconditions.checkState(diskTypes.length > 0);
Preconditions.checkState(diskCount > 0);
DiskBalancerDataNode node = new DiskBalancerDataNode(UUID.randomUUID().toString());
for (StorageType t : diskTypes) {
DiskBalancerVolumeSet vSet = createRandomVolumeSet(t, diskCount);
for (DiskBalancerVolume v : vSet.getVolumes()) {
node.addVolume(v);
}
}
return node;
}
Aggregations