use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.
the class TestCommandQueueReportHandler method resetEventCollector.
@BeforeEach
public void resetEventCollector() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration();
SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
nodeManager = new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap, SCMContext.emptyContext(), versionManager);
commandQueueReportHandler = new CommandQueueReportHandler(nodeManager);
}
use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.
the class TestIncrementalContainerReportHandler method setup.
@BeforeEach
public void setup() throws IOException, InvalidStateTransitionException {
final OzoneConfiguration conf = new OzoneConfiguration();
final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString());
Path scmPath = Paths.get(path, "scm-meta");
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
this.containerManager = Mockito.mock(ContainerManager.class);
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
this.nodeManager = new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap, scmContext, versionManager);
scmhaManager = SCMHAManagerStub.getInstance(true);
testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager);
this.containerStateManager = ContainerStateManagerImpl.newBuilder().setConfiguration(conf).setPipelineManager(pipelineManager).setRatisServer(scmhaManager.getRatisServer()).setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
this.publisher = Mockito.mock(EventPublisher.class);
Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainer(((ContainerID) invocation.getArguments()[0])));
Mockito.when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainerReplicas(((ContainerID) invocation.getArguments()[0])));
Mockito.doAnswer(invocation -> {
containerStateManager.removeContainerReplica(((ContainerID) invocation.getArguments()[0]), (ContainerReplica) invocation.getArguments()[1]);
return null;
}).when(containerManager).removeContainerReplica(Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
Mockito.doAnswer(invocation -> {
containerStateManager.updateContainerState(((ContainerID) invocation.getArguments()[0]).getProtobuf(), (HddsProtos.LifeCycleEvent) invocation.getArguments()[1]);
return null;
}).when(containerManager).updateContainerState(Mockito.any(ContainerID.class), Mockito.any(HddsProtos.LifeCycleEvent.class));
Mockito.doAnswer(invocation -> {
containerStateManager.updateContainerReplica(((ContainerID) invocation.getArguments()[0]), (ContainerReplica) invocation.getArguments()[1]);
return null;
}).when(containerManager).updateContainerReplica(Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
}
use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.
the class TestSCMContainerPlacementRackAware method testDatanodeWithDefaultNetworkLocation.
@ParameterizedTest
@MethodSource("numDatanodes")
public void testDatanodeWithDefaultNetworkLocation(int datanodeCount) throws SCMException {
setup(datanodeCount);
String hostname = "node";
List<DatanodeInfo> dnInfoList = new ArrayList<>();
List<DatanodeDetails> dataList = new ArrayList<>();
NetworkTopology clusterMap = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
for (int i = 0; i < 15; i++) {
// Totally 3 racks, each has 5 datanodes
DatanodeDetails dn = MockDatanodeDetails.createDatanodeDetails(hostname + i, null);
DatanodeInfo dnInfo = new DatanodeInfo(dn, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
StorageReportProto storage1 = HddsTestUtils.createStorageReport(dnInfo.getUuid(), "/data1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
dnInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
dnInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
dataList.add(dn);
clusterMap.add(dn);
dnInfoList.add(dnInfo);
}
Assertions.assertEquals(dataList.size(), StringUtils.countMatches(clusterMap.toString(), NetConstants.DEFAULT_RACK));
for (DatanodeInfo dn : dnInfoList) {
when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
}
// choose nodes to host 3 replica
int nodeNum = 3;
SCMContainerPlacementRackAware newPolicy = new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true, metrics);
List<DatanodeDetails> datanodeDetails = newPolicy.chooseDatanodes(null, null, nodeNum, 0, 15);
Assertions.assertEquals(nodeNum, datanodeDetails.size());
Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.
the class SCMCommonPlacementPolicy method validateContainerPlacement.
/**
* This default implementation handles rack aware policies and non rack
* aware policies. If a future placement policy needs to check more than racks
* to validate the policy (eg node groups, HDFS like upgrade domain) this
* method should be overridden in the sub class.
* This method requires that subclasses which implement rack aware policies
* override the default method getRequiredRackCount and getNetworkTopology.
* @param dns List of datanodes holding a replica of the container
* @param replicas The expected number of replicas
* @return ContainerPlacementStatus indicating if the placement policy is
* met or not. Not this only considers the rack count and not the
* number of replicas.
*/
@Override
public ContainerPlacementStatus validateContainerPlacement(List<DatanodeDetails> dns, int replicas) {
NetworkTopology topology = nodeManager.getClusterNetworkTopologyMap();
int requiredRacks = getRequiredRackCount(replicas);
if (topology == null || replicas == 1 || requiredRacks == 1) {
if (dns.size() > 0) {
// placement is always satisfied if there is at least one DN.
return validPlacement;
} else {
return invalidPlacement;
}
}
// We have a network topology so calculate if it is satisfied or not.
int numRacks = 1;
final int maxLevel = topology.getMaxLevel();
// The leaf nodes are all at max level, so the number of nodes at
// leafLevel - 1 is the rack count
numRacks = topology.getNumOfNodes(maxLevel - 1);
final long currentRackCount = dns.stream().map(d -> topology.getAncestor(d, 1)).distinct().count();
if (replicas < requiredRacks) {
requiredRacks = replicas;
}
return new ContainerPlacementStatusDefault((int) currentRackCount, requiredRacks, numRacks);
}
use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.
the class AbstractReconContainerManagerTest method setUp.
@Before
public void setUp() throws Exception {
conf = new OzoneConfiguration();
conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().getAbsolutePath());
conf.set(OZONE_SCM_NAMES, "localhost");
store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(true, new SCMHADBTransactionBufferStub(store));
sequenceIdGen = new SequenceIdGenerator(conf, scmhaManager, ReconSCMDBDefinition.SEQUENCE_ID.getTable(store));
scmContext = SCMContext.emptyContext();
scmStorageConfig = new ReconStorageConfig(conf, new ReconUtils());
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
layoutVersionManager = mock(HDDSLayoutVersionManager.class);
when(layoutVersionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
when(layoutVersionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, scmContext, layoutVersionManager);
pipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManager, ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue, scmhaManager, scmContext);
ContainerReplicaPendingOps pendingOps = new ContainerReplicaPendingOps(conf, new MonotonicClock(ZoneId.systemDefault()));
containerManager = new ReconContainerManager(conf, store, ReconSCMDBDefinition.CONTAINERS.getTable(store), pipelineManager, getScmServiceProvider(), mock(ContainerHealthSchemaManager.class), mock(ReconContainerMetadataManager.class), scmhaManager, sequenceIdGen, pendingOps);
}
Aggregations