use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class SimpleMockNodeManager method register.
public void register(DatanodeDetails dd, NodeStatus status) {
dd.setPersistedOpState(status.getOperationalState());
dd.setPersistedOpStateExpiryEpochSec(status.getOpStateExpiryEpochSeconds());
nodeMap.put(dd.getUuid(), new DatanodeInfo(dd, status, null));
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class SimpleMockNodeManager method setNodeOperationalState.
@Override
public void setNodeOperationalState(DatanodeDetails dn, NodeOperationalState newState, long opStateExpiryEpocSec) throws NodeNotFoundException {
DatanodeInfo dni = nodeMap.get(dn.getUuid());
if (dni == null) {
throw new NodeNotFoundException();
}
dni.setNodeStatus(new NodeStatus(newState, dni.getNodeStatus().getHealth(), opStateExpiryEpocSec));
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class TestContainerPlacementFactory method testRackAwarePolicy.
@Test
public void testRackAwarePolicy() throws IOException {
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementRackAware.class.getName());
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
NodeSchemaManager.getInstance().init(schemas, true);
cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
// build datanodes, and network topology
String rack = "/rack";
String hostname = "node";
for (int i = 0; i < 15; i++) {
// Totally 3 racks, each has 5 datanodes
DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / 5));
DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
datanodes.add(datanodeDetails);
cluster.add(datanodeDetails);
dnInfos.add(datanodeInfo);
}
StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
// create mock node manager
nodeManager = Mockito.mock(NodeManager.class);
when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
for (DatanodeInfo dn : dnInfos) {
when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
}
PlacementPolicy policy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, cluster, true, SCMContainerPlacementMetrics.create());
int nodeNum = 3;
List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15, 15);
Assertions.assertEquals(nodeNum, datanodeDetails.size());
Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
Assertions.assertFalse(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
Assertions.assertFalse(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class TestSCMContainerPlacementRackAware method testOutOfServiceNodesNotSelected.
@ParameterizedTest
@MethodSource("numDatanodes")
public void testOutOfServiceNodesNotSelected(int datanodeCount) {
setup(datanodeCount);
// Set all the nodes to out of service
for (DatanodeInfo dn : dnInfos) {
dn.setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY));
}
for (int i = 0; i < 10; i++) {
// Set a random DN to in_service and ensure it is always picked
int index = new Random().nextInt(dnInfos.size());
dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy());
try {
List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(null, null, 1, 0, 0);
Assertions.assertEquals(dnInfos.get(index), datanodeDetails.get(0));
} catch (SCMException e) {
// If we get SCMException: No satisfied datanode to meet the ... this is
// ok, as there is only 1 IN_SERVICE node and with the retry logic we
// may never find it.
}
dnInfos.get(index).setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY));
}
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class TestSCMContainerPlacementRackAware method testDatanodeWithDefaultNetworkLocation.
@ParameterizedTest
@MethodSource("numDatanodes")
public void testDatanodeWithDefaultNetworkLocation(int datanodeCount) throws SCMException {
setup(datanodeCount);
String hostname = "node";
List<DatanodeInfo> dnInfoList = new ArrayList<>();
List<DatanodeDetails> dataList = new ArrayList<>();
NetworkTopology clusterMap = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
for (int i = 0; i < 15; i++) {
// Totally 3 racks, each has 5 datanodes
DatanodeDetails dn = MockDatanodeDetails.createDatanodeDetails(hostname + i, null);
DatanodeInfo dnInfo = new DatanodeInfo(dn, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
StorageReportProto storage1 = HddsTestUtils.createStorageReport(dnInfo.getUuid(), "/data1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
dnInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
dnInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
dataList.add(dn);
clusterMap.add(dn);
dnInfoList.add(dnInfo);
}
Assertions.assertEquals(dataList.size(), StringUtils.countMatches(clusterMap.toString(), NetConstants.DEFAULT_RACK));
for (DatanodeInfo dn : dnInfoList) {
when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
}
// choose nodes to host 3 replica
int nodeNum = 3;
SCMContainerPlacementRackAware newPolicy = new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true, metrics);
List<DatanodeDetails> datanodeDetails = newPolicy.chooseDatanodes(null, null, nodeNum, 0, 15);
Assertions.assertEquals(nodeNum, datanodeDetails.size());
Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
Aggregations