Search in sources :

Example 6 with NetworkTopology

use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.

the class TestCommandQueueReportHandler method resetEventCollector.

@BeforeEach
public void resetEventCollector() throws IOException {
    OzoneConfiguration conf = new OzoneConfiguration();
    SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
    Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
    NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
    this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
    Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    nodeManager = new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap, SCMContext.emptyContext(), versionManager);
    commandQueueReportHandler = new CommandQueueReportHandler(nodeManager);
}
Also used : NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) SCMStorageConfig(org.apache.hadoop.hdds.scm.server.SCMStorageConfig) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 7 with NetworkTopology

use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.

the class TestIncrementalContainerReportHandler method setup.

@BeforeEach
public void setup() throws IOException, InvalidStateTransitionException {
    final OzoneConfiguration conf = new OzoneConfiguration();
    final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString());
    Path scmPath = Paths.get(path, "scm-meta");
    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
    this.containerManager = Mockito.mock(ContainerManager.class);
    NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
    EventQueue eventQueue = new EventQueue();
    SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
    this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
    Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    this.nodeManager = new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap, scmContext, versionManager);
    scmhaManager = SCMHAManagerStub.getInstance(true);
    testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
    dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
    pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager);
    this.containerStateManager = ContainerStateManagerImpl.newBuilder().setConfiguration(conf).setPipelineManager(pipelineManager).setRatisServer(scmhaManager.getRatisServer()).setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
    this.publisher = Mockito.mock(EventPublisher.class);
    Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainer(((ContainerID) invocation.getArguments()[0])));
    Mockito.when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainerReplicas(((ContainerID) invocation.getArguments()[0])));
    Mockito.doAnswer(invocation -> {
        containerStateManager.removeContainerReplica(((ContainerID) invocation.getArguments()[0]), (ContainerReplica) invocation.getArguments()[1]);
        return null;
    }).when(containerManager).removeContainerReplica(Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
    Mockito.doAnswer(invocation -> {
        containerStateManager.updateContainerState(((ContainerID) invocation.getArguments()[0]).getProtobuf(), (HddsProtos.LifeCycleEvent) invocation.getArguments()[1]);
        return null;
    }).when(containerManager).updateContainerState(Mockito.any(ContainerID.class), Mockito.any(HddsProtos.LifeCycleEvent.class));
    Mockito.doAnswer(invocation -> {
        containerStateManager.updateContainerReplica(((ContainerID) invocation.getArguments()[0]), (ContainerReplica) invocation.getArguments()[1]);
        return null;
    }).when(containerManager).updateContainerReplica(Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
}
Also used : Path(java.nio.file.Path) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) SCMStorageConfig(org.apache.hadoop.hdds.scm.server.SCMStorageConfig) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) MockPipelineManager(org.apache.hadoop.hdds.scm.pipeline.MockPipelineManager) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) BeforeEach(org.junit.jupiter.api.BeforeEach)

Example 8 with NetworkTopology

use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.

the class TestSCMContainerPlacementRackAware method testDatanodeWithDefaultNetworkLocation.

@ParameterizedTest
@MethodSource("numDatanodes")
public void testDatanodeWithDefaultNetworkLocation(int datanodeCount) throws SCMException {
    setup(datanodeCount);
    String hostname = "node";
    List<DatanodeInfo> dnInfoList = new ArrayList<>();
    List<DatanodeDetails> dataList = new ArrayList<>();
    NetworkTopology clusterMap = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
    for (int i = 0; i < 15; i++) {
        // Totally 3 racks, each has 5 datanodes
        DatanodeDetails dn = MockDatanodeDetails.createDatanodeDetails(hostname + i, null);
        DatanodeInfo dnInfo = new DatanodeInfo(dn, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
        StorageReportProto storage1 = HddsTestUtils.createStorageReport(dnInfo.getUuid(), "/data1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
        dnInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
        dnInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
        dataList.add(dn);
        clusterMap.add(dn);
        dnInfoList.add(dnInfo);
    }
    Assertions.assertEquals(dataList.size(), StringUtils.countMatches(clusterMap.toString(), NetConstants.DEFAULT_RACK));
    for (DatanodeInfo dn : dnInfoList) {
        when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
    }
    // choose nodes to host 3 replica
    int nodeNum = 3;
    SCMContainerPlacementRackAware newPolicy = new SCMContainerPlacementRackAware(nodeManager, conf, clusterMap, true, metrics);
    List<DatanodeDetails> datanodeDetails = newPolicy.chooseDatanodes(null, null, nodeNum, 0, 15);
    Assertions.assertEquals(nodeNum, datanodeDetails.size());
    Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
    Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
    Assertions.assertTrue(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) ArrayList(java.util.ArrayList) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) ParameterizedTest(org.junit.jupiter.params.ParameterizedTest) MethodSource(org.junit.jupiter.params.provider.MethodSource)

Example 9 with NetworkTopology

use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.

the class SCMCommonPlacementPolicy method validateContainerPlacement.

/**
 * This default implementation handles rack aware policies and non rack
 * aware policies. If a future placement policy needs to check more than racks
 * to validate the policy (eg node groups, HDFS like upgrade domain) this
 * method should be overridden in the sub class.
 * This method requires that subclasses which implement rack aware policies
 * override the default method getRequiredRackCount and getNetworkTopology.
 * @param dns List of datanodes holding a replica of the container
 * @param replicas The expected number of replicas
 * @return ContainerPlacementStatus indicating if the placement policy is
 *         met or not. Not this only considers the rack count and not the
 *         number of replicas.
 */
@Override
public ContainerPlacementStatus validateContainerPlacement(List<DatanodeDetails> dns, int replicas) {
    NetworkTopology topology = nodeManager.getClusterNetworkTopologyMap();
    int requiredRacks = getRequiredRackCount(replicas);
    if (topology == null || replicas == 1 || requiredRacks == 1) {
        if (dns.size() > 0) {
            // placement is always satisfied if there is at least one DN.
            return validPlacement;
        } else {
            return invalidPlacement;
        }
    }
    // We have a network topology so calculate if it is satisfied or not.
    int numRacks = 1;
    final int maxLevel = topology.getMaxLevel();
    // The leaf nodes are all at max level, so the number of nodes at
    // leafLevel - 1 is the rack count
    numRacks = topology.getNumOfNodes(maxLevel - 1);
    final long currentRackCount = dns.stream().map(d -> topology.getAncestor(d, 1)).distinct().count();
    if (replicas < requiredRacks) {
        requiredRacks = replicas;
    }
    return new ContainerPlacementStatusDefault((int) currentRackCount, requiredRacks, numRacks);
}
Also used : NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) ContainerPlacementStatusDefault(org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault)

Example 10 with NetworkTopology

use of org.apache.hadoop.hdds.scm.net.NetworkTopology in project ozone by apache.

the class AbstractReconContainerManagerTest method setUp.

@Before
public void setUp() throws Exception {
    conf = new OzoneConfiguration();
    conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().getAbsolutePath());
    conf.set(OZONE_SCM_NAMES, "localhost");
    store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
    scmhaManager = SCMHAManagerStub.getInstance(true, new SCMHADBTransactionBufferStub(store));
    sequenceIdGen = new SequenceIdGenerator(conf, scmhaManager, ReconSCMDBDefinition.SEQUENCE_ID.getTable(store));
    scmContext = SCMContext.emptyContext();
    scmStorageConfig = new ReconStorageConfig(conf, new ReconUtils());
    NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
    EventQueue eventQueue = new EventQueue();
    layoutVersionManager = mock(HDDSLayoutVersionManager.class);
    when(layoutVersionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    when(layoutVersionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, scmContext, layoutVersionManager);
    pipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManager, ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue, scmhaManager, scmContext);
    ContainerReplicaPendingOps pendingOps = new ContainerReplicaPendingOps(conf, new MonotonicClock(ZoneId.systemDefault()));
    containerManager = new ReconContainerManager(conf, store, ReconSCMDBDefinition.CONTAINERS.getTable(store), pipelineManager, getScmServiceProvider(), mock(ContainerHealthSchemaManager.class), mock(ReconContainerMetadataManager.class), scmhaManager, sequenceIdGen, pendingOps);
}
Also used : ReconUtils(org.apache.hadoop.ozone.recon.ReconUtils) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SequenceIdGenerator(org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator) ContainerReplicaPendingOps(org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaPendingOps) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) SCMHADBTransactionBufferStub(org.apache.hadoop.hdds.scm.ha.SCMHADBTransactionBufferStub) MonotonicClock(org.apache.hadoop.ozone.common.MonotonicClock) Before(org.junit.Before)

Aggregations

NetworkTopology (org.apache.hadoop.hdds.scm.net.NetworkTopology)24 NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)16 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)12 EventQueue (org.apache.hadoop.hdds.server.events.EventQueue)10 Test (org.junit.Test)10 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)9 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)8 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)7 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)6 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)6 SCMNodeManager (org.apache.hadoop.hdds.scm.node.SCMNodeManager)5 ArrayList (java.util.ArrayList)4 SCMStorageConfig (org.apache.hadoop.hdds.scm.server.SCMStorageConfig)4 ReconUtils (org.apache.hadoop.ozone.recon.ReconUtils)4 UUID (java.util.UUID)3 MetadataStorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)3 StorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto)3 SCMException (org.apache.hadoop.hdds.scm.exceptions.SCMException)3 DatanodeInfo (org.apache.hadoop.hdds.scm.node.DatanodeInfo)3 Path (java.nio.file.Path)2