use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.
the class TestReadOnlySharedStorage method validateStorageState.
private void validateStorageState(StorageReport[] storageReports, DatanodeStorage.State state) {
for (StorageReport storageReport : storageReports) {
DatanodeStorage storage = storageReport.getStorage();
assertThat(storage.getState(), is(state));
}
}
use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.
the class ExternalDatasetImpl method getStorageReports.
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
StorageReport[] result = new StorageReport[1];
result[0] = new StorageReport(storage, false, 0, 0, 0, 0, 0);
return result;
}
use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.
the class FsDatasetImpl method getStorageReports.
// FsDatasetSpi
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
List<StorageReport> reports;
synchronized (statsLock) {
List<FsVolumeImpl> curVolumes = volumes.getVolumes();
reports = new ArrayList<>(curVolumes.size());
for (FsVolumeImpl volume : curVolumes) {
try (FsVolumeReference ref = volume.obtainReference()) {
StorageReport sr = new StorageReport(volume.toDatanodeStorage(), false, volume.getCapacity(), volume.getDfsUsed(), volume.getAvailable(), volume.getBlockPoolUsed(bpid), volume.getNonDfsUsed());
reports.add(sr);
} catch (ClosedChannelException e) {
continue;
}
}
}
return reports.toArray(new StorageReport[reports.size()]);
}
use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.
the class DBNameNodeConnector method getVolumeInfoFromStorageReports.
/**
* Reads the relevant fields from each storage volume and populate the
* DiskBalancer Node.
*
* @param node - Disk Balancer Node
* @param reports - Array of StorageReport
*/
private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node, StorageReport[] reports) throws Exception {
Preconditions.checkNotNull(node);
Preconditions.checkNotNull(reports);
for (StorageReport report : reports) {
DatanodeStorage storage = report.getStorage();
DiskBalancerVolume volume = new DiskBalancerVolume();
volume.setCapacity(report.getCapacity());
volume.setFailed(report.isFailed());
volume.setUsed(report.getDfsUsed());
// TODO : Should we do BlockPool level balancing at all ?
// Does it make sense ? Balancer does do that. Right now
// we only deal with volumes and not blockPools
volume.setUsed(report.getDfsUsed());
volume.setUuid(storage.getStorageID());
// we will skip this volume for disk balancer if
// it is read-only since we will not be able to delete
// or if it is already failed.
volume.setSkip((storage.getState() == DatanodeStorage.State.READ_ONLY_SHARED) || report.isFailed());
volume.setStorageType(storage.getStorageType().name());
volume.setIsTransient(storage.getStorageType().isTransient());
node.addVolume(volume);
}
}
use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.
the class BlockManagerTestUtil method getStorageReportsForDatanode.
public static StorageReport[] getStorageReportsForDatanode(DatanodeDescriptor dnd) {
ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
for (DatanodeStorageInfo storage : dnd.getStorageInfos()) {
DatanodeStorage dns = new DatanodeStorage(storage.getStorageID(), storage.getState(), storage.getStorageType());
StorageReport report = new StorageReport(dns, false, storage.getCapacity(), storage.getDfsUsed(), storage.getRemaining(), storage.getBlockPoolUsed(), 0);
reports.add(report);
}
return reports.toArray(StorageReport.EMPTY_ARRAY);
}
Aggregations