Search in sources :

Example 1 with MetadataStorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto in project ozone by apache.

the class SCMCommonPlacementPolicy method hasEnoughSpace.

/**
 * Returns true if this node has enough space to meet our requirement.
 *
 * @param datanodeDetails DatanodeDetails
 * @return true if we have enough space.
 */
public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, long metadataSizeRequired, long dataSizeRequired) {
    Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo);
    boolean enoughForData = false;
    boolean enoughForMeta = false;
    DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails;
    if (dataSizeRequired > 0) {
        for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
            if (reportProto.getRemaining() > dataSizeRequired) {
                enoughForData = true;
                break;
            }
        }
    } else {
        enoughForData = true;
    }
    if (!enoughForData) {
        return false;
    }
    if (metadataSizeRequired > 0) {
        for (MetadataStorageReportProto reportProto : datanodeInfo.getMetadataStorageReports()) {
            if (reportProto.getRemaining() > metadataSizeRequired) {
                enoughForMeta = true;
                break;
            }
        }
    } else {
        enoughForMeta = true;
    }
    return enoughForData && enoughForMeta;
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)

Example 2 with MetadataStorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto in project ozone by apache.

the class TestContainerPlacementFactory method testRackAwarePolicy.

@Test
public void testRackAwarePolicy() throws IOException {
    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementRackAware.class.getName());
    conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
    NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
    NodeSchemaManager.getInstance().init(schemas, true);
    cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
    // build datanodes, and network topology
    String rack = "/rack";
    String hostname = "node";
    for (int i = 0; i < 15; i++) {
        // Totally 3 racks, each has 5 datanodes
        DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / 5));
        DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
        StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
        datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
        datanodes.add(datanodeDetails);
        cluster.add(datanodeDetails);
        dnInfos.add(datanodeInfo);
    }
    StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
    dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
    StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
    dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
    StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
    dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
    // create mock node manager
    nodeManager = Mockito.mock(NodeManager.class);
    when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
    for (DatanodeInfo dn : dnInfos) {
        when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
    }
    PlacementPolicy policy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, cluster, true, SCMContainerPlacementMetrics.create());
    int nodeNum = 3;
    List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15, 15);
    Assert.assertEquals(nodeNum, datanodeDetails.size());
    Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
    Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) PlacementPolicy(org.apache.hadoop.hdds.scm.PlacementPolicy) NodeSchema(org.apache.hadoop.hdds.scm.net.NodeSchema) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) Test(org.junit.Test)

Example 3 with MetadataStorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto in project ozone by apache.

the class TestNodeReportHandler method testNodeReport.

@Test
public void testNodeReport() throws IOException {
    DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
    StorageReportProto storageOne = HddsTestUtils.createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
    MetadataStorageReportProto metaStorageOne = HddsTestUtils.createMetadataStorageReport(metaStoragePath, 100, 10, 90, null);
    SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
    Assert.assertNull(nodeMetric);
    nodeManager.register(dn, getNodeReport(dn, Arrays.asList(storageOne), Arrays.asList(metaStorageOne)).getReport(), null);
    nodeMetric = nodeManager.getNodeStat(dn);
    Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100);
    Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90);
    Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10);
    StorageReportProto storageTwo = HddsTestUtils.createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
    nodeReportHandler.onMessage(getNodeReport(dn, Arrays.asList(storageOne, storageTwo), Arrays.asList(metaStorageOne)), this);
    nodeMetric = nodeManager.getNodeStat(dn);
    Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200);
    Assert.assertTrue(nodeMetric.get().getRemaining().get() == 180);
    Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 20);
}
Also used : MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) SCMNodeMetric(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) Test(org.junit.Test)

Example 4 with MetadataStorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto in project ozone by apache.

the class TestSCMContainerPlacementRackAware method setup.

@Before
public void setup() {
    // initialize network topology instance
    conf = new OzoneConfiguration();
    // We are using small units here
    conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 1, StorageUnit.BYTES);
    NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
    NodeSchemaManager.getInstance().init(schemas, true);
    cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
    // build datanodes, and network topology
    String rack = "/rack";
    String hostname = "node";
    for (int i = 0; i < datanodeCount; i++) {
        // Totally 3 racks, each has 5 datanodes
        DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / NODE_PER_RACK));
        DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
        StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
        datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
        datanodes.add(datanodeDetails);
        cluster.add(datanodeDetails);
        dnInfos.add(datanodeInfo);
    }
    if (datanodeCount > 4) {
        StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
        dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
        StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
        dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
        StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
        dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
    } else if (datanodeCount > 3) {
        StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
        dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
        StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
        dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
    } else if (datanodeCount > 2) {
        StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 84L, 16L, null);
        dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
    }
    // create mock node manager
    nodeManager = Mockito.mock(NodeManager.class);
    when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
    for (DatanodeInfo dn : dnInfos) {
        when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
    }
    when(nodeManager.getClusterNetworkTopologyMap()).thenReturn(cluster);
    // create placement policy instances
    metrics = SCMContainerPlacementMetrics.create();
    policy = new SCMContainerPlacementRackAware(nodeManager, conf, cluster, true, metrics);
    policyNoFallback = new SCMContainerPlacementRackAware(nodeManager, conf, cluster, false, metrics);
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeSchema(org.apache.hadoop.hdds.scm.net.NodeSchema) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) Before(org.junit.Before)

Example 5 with MetadataStorageReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto in project ozone by apache.

the class HddsTestUtils method createMetadataStorageReport.

/**
 * Creates metadata storage report with the given information.
 *
 * @param path      storage dir
 * @param capacity  storage size
 * @param used      space used
 * @param remaining space remaining
 * @param type      type of storage
 *
 * @return StorageReportProto
 */
public static MetadataStorageReportProto createMetadataStorageReport(String path, long capacity, long used, long remaining, StorageTypeProto type, boolean failed) {
    Preconditions.checkNotNull(path);
    MetadataStorageReportProto.Builder srb = MetadataStorageReportProto.newBuilder();
    srb.setStorageLocation(path).setCapacity(capacity).setScmUsed(used).setFailed(failed).setRemaining(remaining);
    StorageTypeProto storageTypeProto = type == null ? StorageTypeProto.DISK : type;
    srb.setStorageType(storageTypeProto);
    return srb.build();
}
Also used : StorageTypeProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)

Aggregations

MetadataStorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)12 StorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto)11 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)9 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)9 DatanodeInfo (org.apache.hadoop.hdds.scm.node.DatanodeInfo)8 Test (org.junit.Test)7 ArrayList (java.util.ArrayList)6 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)5 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)4 NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)3 SCMNodeMetric (org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric)2 NodeSchema (org.apache.hadoop.hdds.scm.net.NodeSchema)2 HashMap (java.util.HashMap)1 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)1 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)1 StorageTypeProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto)1 PlacementPolicy (org.apache.hadoop.hdds.scm.PlacementPolicy)1 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)1 ContainerReplica (org.apache.hadoop.hdds.scm.container.ContainerReplica)1 NetworkTopology (org.apache.hadoop.hdds.scm.net.NetworkTopology)1