Search in sources :

Example 1 with StorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.

the class TestSCMNodeMetrics method testNodeReportProcessingFailure.

/**
 * Verifies node report processing failure count.
 */
@Test
public void testNodeReportProcessingFailure() {
    long nrProcessed = getCounter("NumNodeReportProcessingFailed");
    DatanodeDetails randomDatanode = MockDatanodeDetails.randomDatanodeDetails();
    StorageReportProto storageReport = HddsTestUtils.createStorageReport(randomDatanode.getUuid(), "/tmp", 100, 10, 90, null);
    NodeReportProto nodeReport = NodeReportProto.newBuilder().addStorageReport(storageReport).build();
    nodeManager.processNodeReport(randomDatanode, nodeReport);
    assertEquals("NumNodeReportProcessingFailed", nrProcessed + 1, getCounter("NumNodeReportProcessingFailed"));
}
Also used : MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) Test(org.junit.Test)

Example 2 with StorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.

the class TestSCMNodeMetrics method testNodeReportProcessing.

/**
 * Verifies node report processing count.
 *
 * @throws InterruptedException
 */
@Test
public void testNodeReportProcessing() throws InterruptedException {
    long nrProcessed = getCounter("NumNodeReportProcessed");
    StorageReportProto storageReport = HddsTestUtils.createStorageReport(registeredDatanode.getUuid(), "/tmp", 100, 10, 90, null);
    NodeReportProto nodeReport = NodeReportProto.newBuilder().addStorageReport(storageReport).build();
    nodeManager.processNodeReport(registeredDatanode, nodeReport);
    Assert.assertEquals("NumNodeReportProcessed", nrProcessed + 1, getCounter("NumNodeReportProcessed"));
}
Also used : StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) Test(org.junit.Test)

Example 3 with StorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.

the class SCMNodeManager method getNodeStatInternal.

private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) {
    try {
        long capacity = 0L;
        long used = 0L;
        long remaining = 0L;
        final DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails);
        final List<StorageReportProto> storageReportProtos = datanodeInfo.getStorageReports();
        for (StorageReportProto reportProto : storageReportProtos) {
            capacity += reportProto.getCapacity();
            used += reportProto.getScmUsed();
            remaining += reportProto.getRemaining();
        }
        return new SCMNodeStat(capacity, used, remaining);
    } catch (NodeNotFoundException e) {
        LOG.warn("Cannot generate NodeStat, datanode {} not found.", datanodeDetails.getUuid());
        return null;
    }
}
Also used : NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat)

Example 4 with StorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.

the class SCMCommonPlacementPolicy method hasEnoughSpace.

/**
 * Returns true if this node has enough space to meet our requirement.
 *
 * @param datanodeDetails DatanodeDetails
 * @return true if we have enough space.
 */
public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, long metadataSizeRequired, long dataSizeRequired) {
    Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo);
    boolean enoughForData = false;
    boolean enoughForMeta = false;
    DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails;
    if (dataSizeRequired > 0) {
        for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
            if (reportProto.getRemaining() > dataSizeRequired) {
                enoughForData = true;
                break;
            }
        }
    } else {
        enoughForData = true;
    }
    if (!enoughForData) {
        return false;
    }
    if (metadataSizeRequired > 0) {
        for (MetadataStorageReportProto reportProto : datanodeInfo.getMetadataStorageReports()) {
            if (reportProto.getRemaining() > metadataSizeRequired) {
                enoughForMeta = true;
                break;
            }
        }
    } else {
        enoughForMeta = true;
    }
    return enoughForData && enoughForMeta;
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)

Example 5 with StorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.

the class TestEndpoints method setUp.

@Before
public void setUp() throws Exception {
    // The following setup runs only once
    if (!isSetupDone) {
        initializeInjector();
        isSetupDone = true;
    }
    String datanodeId = datanodeDetails.getUuid().toString();
    String datanodeId2 = datanodeDetails2.getUuid().toString();
    containerReportsProto = ContainerReportsProto.newBuilder().addReports(ContainerReplicaProto.newBuilder().setContainerID(containerId).setState(ContainerReplicaProto.State.OPEN).setOriginNodeId(datanodeId).build()).build();
    UUID pipelineUuid = UUID.fromString(pipelineId);
    HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder().setMostSigBits(pipelineUuid.getMostSignificantBits()).setLeastSigBits(pipelineUuid.getLeastSignificantBits()).build();
    PipelineReport pipelineReport = PipelineReport.newBuilder().setPipelineID(PipelineID.newBuilder().setId(pipelineId).setUuid128(uuid128).build()).setIsLeader(true).build();
    PipelineReportsProto pipelineReportsProto = PipelineReportsProto.newBuilder().addPipelineReport(pipelineReport).build();
    DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder().setHostName(HOST1).setUuid(datanodeId).setIpAddress(IP1).build();
    extendedDatanodeDetailsProto = HddsProtos.ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto).setVersion("0.6.0").setSetupTime(1596347628802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
    StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400).setCapacity(25000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    StorageReportProto storageReportProto2 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(25000).setRemaining(10000).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    NodeReportProto nodeReportProto = NodeReportProto.newBuilder().addStorageReport(storageReportProto1).addStorageReport(storageReportProto2).build();
    DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder().setHostName(HOST2).setUuid(datanodeId2).setIpAddress(IP2).build();
    ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto2).setVersion("0.6.0").setSetupTime(1596347636802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
    StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    StorageReportProto storageReportProto4 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000).setCapacity(80000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
    NodeReportProto nodeReportProto2 = NodeReportProto.newBuilder().addStorageReport(storageReportProto3).addStorageReport(storageReportProto4).build();
    LayoutVersionProto layoutInfo = defaultLayoutVersionProto();
    try {
        reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo);
        reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto2, nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build(), defaultLayoutVersionProto());
        // Process all events in the event queue
        reconScm.getEventQueue().processAll(1000);
    } catch (Exception ex) {
        Assert.fail(ex.getMessage());
    }
    // Write Data to OM
    // A sample volume (sampleVol) and a bucket (bucketOne) is already created
    // in AbstractOMMetadataManagerTest.
    // Create a new volume and bucket and then write keys to the bucket.
    String volumeKey = reconOMMetadataManager.getVolumeKey("sampleVol2");
    OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol2").setAdminName("TestUser").setOwnerName("TestUser").build();
    reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
    OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol2").setBucketName("bucketOne").build();
    String bucketKey = reconOMMetadataManager.getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName());
    reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
    // key = key_one
    writeDataToOm(reconOMMetadataManager, "key_one");
    // key = key_two
    writeDataToOm(reconOMMetadataManager, "key_two");
    // key = key_three
    writeDataToOm(reconOMMetadataManager, "key_three");
    // Truncate global stats table before running each test
    dslContext.truncate(GLOBAL_STATS);
}
Also used : ExtendedDatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto) OmBucketInfo(org.apache.hadoop.ozone.om.helpers.OmBucketInfo) PipelineReport(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) UpgradeUtils.defaultLayoutVersionProto(org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto) OmVolumeArgs(org.apache.hadoop.ozone.om.helpers.OmVolumeArgs) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) DatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto) ExtendedDatanodeDetailsProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ExtendedDatanodeDetailsProto) IOException(java.io.IOException) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) UUID(java.util.UUID) PipelineReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) Before(org.junit.Before)

Aggregations

StorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto)28 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)18 MetadataStorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)17 Test (org.junit.Test)17 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)16 ArrayList (java.util.ArrayList)10 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)10 UUID (java.util.UUID)9 NodeReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto)9 LayoutVersionProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto)8 DatanodeInfo (org.apache.hadoop.hdds.scm.node.DatanodeInfo)8 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)7 LayoutVersionManager (org.apache.hadoop.ozone.upgrade.LayoutVersionManager)7 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)6 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)6 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)5 UpgradeUtils.toLayoutVersionProto (org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto)5 IOException (java.io.IOException)4 SCMNodeMetric (org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric)4 SCMNodeStat (org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat)3