Search in sources :

Example 1 with StorageReport

use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.

the class TestReadOnlySharedStorage method validateStorageState.

private void validateStorageState(StorageReport[] storageReports, DatanodeStorage.State state) {
    for (StorageReport storageReport : storageReports) {
        DatanodeStorage storage = storageReport.getStorage();
        assertThat(storage.getState(), is(state));
    }
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Example 2 with StorageReport

use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.

the class ExternalDatasetImpl method getStorageReports.

@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
    StorageReport[] result = new StorageReport[1];
    result[0] = new StorageReport(storage, false, 0, 0, 0, 0, 0);
    return result;
}
Also used : StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Example 3 with StorageReport

use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.

the class FsDatasetImpl method getStorageReports.

// FsDatasetSpi
@Override
public StorageReport[] getStorageReports(String bpid) throws IOException {
    List<StorageReport> reports;
    synchronized (statsLock) {
        List<FsVolumeImpl> curVolumes = volumes.getVolumes();
        reports = new ArrayList<>(curVolumes.size());
        for (FsVolumeImpl volume : curVolumes) {
            try (FsVolumeReference ref = volume.obtainReference()) {
                StorageReport sr = new StorageReport(volume.toDatanodeStorage(), false, volume.getCapacity(), volume.getDfsUsed(), volume.getAvailable(), volume.getBlockPoolUsed(bpid), volume.getNonDfsUsed());
                reports.add(sr);
            } catch (ClosedChannelException e) {
                continue;
            }
        }
    }
    return reports.toArray(new StorageReport[reports.size()]);
}
Also used : ClosedChannelException(java.nio.channels.ClosedChannelException) FsVolumeReference(org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Example 4 with StorageReport

use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.

the class DBNameNodeConnector method getVolumeInfoFromStorageReports.

/**
   * Reads the relevant fields from each storage volume and populate the
   * DiskBalancer Node.
   *
   * @param node    - Disk Balancer Node
   * @param reports - Array of StorageReport
   */
private void getVolumeInfoFromStorageReports(DiskBalancerDataNode node, StorageReport[] reports) throws Exception {
    Preconditions.checkNotNull(node);
    Preconditions.checkNotNull(reports);
    for (StorageReport report : reports) {
        DatanodeStorage storage = report.getStorage();
        DiskBalancerVolume volume = new DiskBalancerVolume();
        volume.setCapacity(report.getCapacity());
        volume.setFailed(report.isFailed());
        volume.setUsed(report.getDfsUsed());
        // TODO : Should we do BlockPool level balancing at all ?
        // Does it make sense ? Balancer does do that. Right now
        // we only deal with volumes and not blockPools
        volume.setUsed(report.getDfsUsed());
        volume.setUuid(storage.getStorageID());
        // we will skip this volume for disk balancer if
        // it is read-only since we will not be able to delete
        // or if it is already failed.
        volume.setSkip((storage.getState() == DatanodeStorage.State.READ_ONLY_SHARED) || report.isFailed());
        volume.setStorageType(storage.getStorageType().name());
        volume.setIsTransient(storage.getStorageType().isTransient());
        node.addVolume(volume);
    }
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) DatanodeStorageReport(org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) DiskBalancerVolume(org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume)

Example 5 with StorageReport

use of org.apache.hadoop.hdfs.server.protocol.StorageReport in project hadoop by apache.

the class BlockManagerTestUtil method getStorageReportsForDatanode.

public static StorageReport[] getStorageReportsForDatanode(DatanodeDescriptor dnd) {
    ArrayList<StorageReport> reports = new ArrayList<StorageReport>();
    for (DatanodeStorageInfo storage : dnd.getStorageInfos()) {
        DatanodeStorage dns = new DatanodeStorage(storage.getStorageID(), storage.getState(), storage.getStorageType());
        StorageReport report = new StorageReport(dns, false, storage.getCapacity(), storage.getDfsUsed(), storage.getRemaining(), storage.getBlockPoolUsed(), 0);
        reports.add(report);
    }
    return reports.toArray(StorageReport.EMPTY_ARRAY);
}
Also used : DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) ArrayList(java.util.ArrayList) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport)

Aggregations

StorageReport (org.apache.hadoop.hdfs.server.protocol.StorageReport)16 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)4 VolumeFailureSummary (org.apache.hadoop.hdfs.server.protocol.VolumeFailureSummary)4 IOException (java.io.IOException)3 Configuration (org.apache.hadoop.conf.Configuration)3 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)3 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)3 HeartbeatResponse (org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse)3 SlowPeerReports (org.apache.hadoop.hdfs.server.protocol.SlowPeerReports)3 Test (org.junit.Test)3 ServiceException (com.google.protobuf.ServiceException)2 ArrayList (java.util.ArrayList)2 Path (org.apache.hadoop.fs.Path)2 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)2 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)2 DatanodeStorageReport (org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport)2 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)2 File (java.io.File)1 ClosedChannelException (java.nio.channels.ClosedChannelException)1