Search in sources :

Example 6 with DatanodeInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.

the class SimpleMockNodeManager method register.

public void register(DatanodeDetails dd, NodeStatus status) {
    dd.setPersistedOpState(status.getOperationalState());
    dd.setPersistedOpStateExpiryEpochSec(status.getOpStateExpiryEpochSeconds());
    nodeMap.put(dd.getUuid(), new DatanodeInfo(dd, status, null));
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo)

Example 7 with DatanodeInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.

the class SimpleMockNodeManager method setNodeOperationalState.

@Override
public void setNodeOperationalState(DatanodeDetails dn, NodeOperationalState newState, long opStateExpiryEpocSec) throws NodeNotFoundException {
    DatanodeInfo dni = nodeMap.get(dn.getUuid());
    if (dni == null) {
        throw new NodeNotFoundException();
    }
    dni.setNodeStatus(new NodeStatus(newState, dni.getNodeStatus().getHealth(), opStateExpiryEpocSec));
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus)

Example 8 with DatanodeInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.

the class TestContainerPlacementFactory method testRackAwarePolicy.

@Test
public void testRackAwarePolicy() throws IOException {
    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementRackAware.class.getName());
    conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
    NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
    NodeSchemaManager.getInstance().init(schemas, true);
    cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
    // build datanodes, and network topology
    String rack = "/rack";
    String hostname = "node";
    for (int i = 0; i < 15; i++) {
        // Totally 3 racks, each has 5 datanodes
        DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / 5));
        DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
        StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
        datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
        datanodes.add(datanodeDetails);
        cluster.add(datanodeDetails);
        dnInfos.add(datanodeInfo);
    }
    StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
    dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
    StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
    dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
    StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
    dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
    // create mock node manager
    nodeManager = Mockito.mock(NodeManager.class);
    when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
    for (DatanodeInfo dn : dnInfos) {
        when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
    }
    PlacementPolicy policy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, cluster, true, SCMContainerPlacementMetrics.create());
    int nodeNum = 3;
    List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15, 15);
    Assertions.assertEquals(nodeNum, datanodeDetails.size());
    Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
    Assertions.assertFalse(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
    Assertions.assertFalse(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) PlacementPolicy(org.apache.hadoop.hdds.scm.PlacementPolicy) NodeSchema(org.apache.hadoop.hdds.scm.net.NodeSchema) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) Test(org.junit.jupiter.api.Test)

Example 9 with DatanodeInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.

the class TestSCMContainerPlacementRackAware method testOutOfServiceNodesNotSelected.

@ParameterizedTest
@MethodSource("numDatanodes")
public void testOutOfServiceNodesNotSelected(int datanodeCount) {
    setup(datanodeCount);
    // Set all the nodes to out of service
    for (DatanodeInfo dn : dnInfos) {
        dn.setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY));
    }
    for (int i = 0; i < 10; i++) {
        // Set a random DN to in_service and ensure it is always picked
        int index = new Random().nextInt(dnInfos.size());
        dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
        try {
            List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(null, null, 1, 0, 0);
            Assertions.assertEquals(dnInfos.get(index), datanodeDetails.get(0));
        } catch (SCMException e) {
        // If we get SCMException: No satisfied datanode to meet the ... this is
        // ok, as there is only 1 IN_SERVICE node and with the retry logic we
        // may never find it.
        }
        dnInfos.get(index).setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY));
    }
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) Random(java.util.Random) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) SCMException(org.apache.hadoop.hdds.scm.exceptions.SCMException) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 10 with DatanodeInfo

use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.

the class TestSCMContainerPlacementRackAware method testDatanodeWithDefaultNetworkLocation.

@ParameterizedTest
@MethodSource("numDatanodes")
public void testDatanodeWithDefaultNetworkLocation(int datanodeCount) throws SCMException {
    setup(datanodeCount);
    String hostname = "node";
    List<DatanodeInfo> dnInfoList = new ArrayList<>();
    List<DatanodeDetails> dataList = new ArrayList<>();
    NetworkTopology clusterMap = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
    for (int i = 0; i < 15; i++) {
        // Totally 3 racks, each has 5 datanodes
        DatanodeDetails dn = MockDatanodeDetails.createDatanodeDetails(hostname + i, null);
        DatanodeInfo dnInfo = new DatanodeInfo(dn, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
        StorageReportProto storage1 = HddsTestUtils.createStorageReport(dnInfo.getUuid(), "/data1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        dnInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
        dnInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
        dataList.add(dn);
        clusterMap.add(dn);
        dnInfoList.add(dnInfo);
    }
    Assertions.assertEquals(dataList.size(), StringUtils.countMatches(clusterMap.toString(), NetConstants.DEFAULT_RACK));
    for (DatanodeInfo dn : dnInfoList) {
        when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
    }
    // choose nodes to host 3 replica
    int nodeNum = 3;
    SCMContainerPlacementRackAware newPolicy = new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true, metrics);
    List<DatanodeDetails> datanodeDetails = newPolicy.chooseDatanodes(null, null, nodeNum, 0, 15);
    Assertions.assertEquals(nodeNum, datanodeDetails.size());
    Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
    Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
    Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) ArrayList(java.util.ArrayList) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Aggregations

DatanodeInfo (org.apache.hadoop.hdds.scm.node.DatanodeInfo)22 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)14 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)12 MetadataStorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)12 StorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto)12 ArrayList (java.util.ArrayList)11 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)7 NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)7 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)7 NodeStatus (org.apache.hadoop.hdds.scm.node.NodeStatus)5 NodeSchema (org.apache.hadoop.hdds.scm.net.NodeSchema)4 Test (org.junit.jupiter.api.Test)4 NetworkTopology (org.apache.hadoop.hdds.scm.net.NetworkTopology)3 Test (org.junit.Test)3 ParameterizedTest (org.junit.jupiter.params.ParameterizedTest)3 MethodSource (org.junit.jupiter.params.provider.MethodSource)3 Random (java.util.Random)2 SCMException (org.apache.hadoop.hdds.scm.exceptions.SCMException)2 NodeNotFoundException (org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException)2 IOException (java.io.IOException)1