use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport in project hadoop by apache.
the class TestDatanodeReport method assertReports.
static void assertReports(int numDatanodes, DatanodeReportType type, DFSClient client, List<DataNode> datanodes, String bpid) throws IOException {
final DatanodeInfo[] infos = client.datanodeReport(type);
assertEquals(numDatanodes, infos.length);
final DatanodeStorageReport[] reports = client.getDatanodeStorageReport(type);
assertEquals(numDatanodes, reports.length);
for (int i = 0; i < infos.length; i++) {
assertEquals(infos[i], reports[i].getDatanodeInfo());
final DataNode d = findDatanode(infos[i].getDatanodeUuid(), datanodes);
if (bpid != null) {
//check storage
final StorageReport[] computed = reports[i].getStorageReports();
Arrays.sort(computed, CMP);
final StorageReport[] expected = d.getFSDataset().getStorageReports(bpid);
Arrays.sort(expected, CMP);
assertEquals(expected.length, computed.length);
for (int j = 0; j < expected.length; j++) {
assertEquals(expected[j].getStorage().getStorageID(), computed[j].getStorage().getStorageID());
}
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport in project hadoop by apache.
the class TestBalancerWithMultipleNameNodes method compareTotalPoolUsage.
/**
* Compare the total blockpool usage on each datanode to ensure that nothing
* was balanced.
*
* @param preReports storage reports from pre balancer run
* @param postReports storage reports from post balancer run
*/
private static void compareTotalPoolUsage(DatanodeStorageReport[] preReports, DatanodeStorageReport[] postReports) {
Assert.assertNotNull(preReports);
Assert.assertNotNull(postReports);
Assert.assertEquals(preReports.length, postReports.length);
for (DatanodeStorageReport preReport : preReports) {
String dnUuid = preReport.getDatanodeInfo().getDatanodeUuid();
for (DatanodeStorageReport postReport : postReports) {
if (postReport.getDatanodeInfo().getDatanodeUuid().equals(dnUuid)) {
Assert.assertEquals(getTotalPoolUsage(preReport), getTotalPoolUsage(postReport));
LOG.info("Comparision of datanode pool usage pre/post balancer run. " + "PrePoolUsage: " + getTotalPoolUsage(preReport) + ", PostPoolUsage: " + getTotalPoolUsage(postReport));
break;
}
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport in project hadoop by apache.
the class Balancer method init.
/**
* Given a datanode storage set, build a network topology and decide
* over-utilized storages, above average utilized storages,
* below average utilized storages, and underutilized storages.
* The input datanode storage set is shuffled in order to randomize
* to the storage matching later on.
*
* @return the number of bytes needed to move in order to balance the cluster.
*/
private long init(List<DatanodeStorageReport> reports) {
// compute average utilization
for (DatanodeStorageReport r : reports) {
policy.accumulateSpaces(r);
}
policy.initAvgUtilization();
// create network topology and classify utilization collections:
// over-utilized, above-average, below-average and under-utilized.
long overLoadedBytes = 0L, underLoadedBytes = 0L;
for (DatanodeStorageReport r : reports) {
final DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
final boolean isSource = Util.isIncluded(sourceNodes, dn.getDatanodeInfo());
for (StorageType t : StorageType.getMovableTypes()) {
final Double utilization = policy.getUtilization(r, t);
if (utilization == null) {
// datanode does not have such storage type
continue;
}
final double average = policy.getAvgUtilization(t);
if (utilization >= average && !isSource) {
LOG.info(dn + "[" + t + "] has utilization=" + utilization + " >= average=" + average + " but it is not specified as a source; skipping it.");
continue;
}
final double utilizationDiff = utilization - average;
final long capacity = getCapacity(r, t);
final double thresholdDiff = Math.abs(utilizationDiff) - threshold;
final long maxSize2Move = computeMaxSize2Move(capacity, getRemaining(r, t), utilizationDiff, maxSizeToMove);
final StorageGroup g;
if (utilizationDiff > 0) {
final Source s = dn.addSource(t, maxSize2Move, dispatcher);
if (thresholdDiff <= 0) {
// within threshold
aboveAvgUtilized.add(s);
} else {
overLoadedBytes += percentage2bytes(thresholdDiff, capacity);
overUtilized.add(s);
}
g = s;
} else {
g = dn.addTarget(t, maxSize2Move);
if (thresholdDiff <= 0) {
// within threshold
belowAvgUtilized.add(g);
} else {
underLoadedBytes += percentage2bytes(thresholdDiff, capacity);
underUtilized.add(g);
}
}
dispatcher.getStorageGroupMap().put(g);
}
}
logUtilizationCollections();
Preconditions.checkState(dispatcher.getStorageGroupMap().size() == overUtilized.size() + underUtilized.size() + aboveAvgUtilized.size() + belowAvgUtilized.size(), "Mismatched number of storage groups");
// return number of bytes to be moved in order to make the cluster balanced
return Math.max(overLoadedBytes, underLoadedBytes);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport in project hadoop by apache.
the class Dispatcher method init.
/** Get live datanode storage reports and then build the network topology. */
public List<DatanodeStorageReport> init() throws IOException {
final DatanodeStorageReport[] reports = nnc.getLiveDatanodeStorageReport();
final List<DatanodeStorageReport> trimmed = new ArrayList<DatanodeStorageReport>();
// over-utilized, above-average, below-average and under-utilized.
for (DatanodeStorageReport r : DFSUtil.shuffle(reports)) {
final DatanodeInfo datanode = r.getDatanodeInfo();
if (shouldIgnore(datanode)) {
continue;
}
trimmed.add(r);
cluster.add(datanode);
}
return trimmed;
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport in project SSM by Intel-bigdata.
the class Mover method init.
@VisibleForTesting
void init() throws IOException {
final List<DatanodeStorageReport> reports = dispatcher.init();
for (DatanodeStorageReport r : reports) {
final Dispatcher.DDatanode dn = dispatcher.newDatanode(r.getDatanodeInfo());
for (StorageType t : StorageType.getMovableTypes()) {
final Dispatcher.Source source = dn.addSource(t, Long.MAX_VALUE, dispatcher);
final long maxRemaining = getMaxRemaining(r, t);
final Dispatcher.DDatanode.StorageGroup target = maxRemaining > 0L ? dn.addTarget(t, maxRemaining) : null;
storages.add(source, target);
}
}
}
Aggregations