Search in sources :

Example 16 with NodeManager

use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.

the class TestSCMDatanodeHeartbeatDispatcher method testNodeReportDispatcher.

@Test
public void testNodeReportDispatcher() throws IOException {
    AtomicInteger eventReceived = new AtomicInteger();
    NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
    Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())).thenReturn(true);
    SCMDatanodeHeartbeatDispatcher dispatcher = new SCMDatanodeHeartbeatDispatcher(mockNodeManager, new EventPublisher() {

        @Override
        public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(EVENT event, PAYLOAD payload) {
            Assert.assertEquals(event, NODE_REPORT);
            eventReceived.incrementAndGet();
            Assert.assertEquals(nodeReport, ((NodeReportFromDatanode) payload).getReport());
        }
    });
    DatanodeDetails datanodeDetails = randomDatanodeDetails();
    SCMHeartbeatRequestProto heartbeat = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(datanodeDetails.getProtoBufMessage()).setNodeReport(nodeReport).build();
    dispatcher.dispatch(heartbeat);
    Assert.assertEquals(1, eventReceived.get());
}
Also used : SCMHeartbeatRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) Test(org.junit.Test)

Example 17 with NodeManager

use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.

the class TestPipelineDatanodesIntersection method testPipelineDatanodesIntersection.

@Test
public void testPipelineDatanodesIntersection() throws IOException {
    NodeManager nodeManager = new MockNodeManager(true, nodeCount);
    conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness);
    conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
    SCMHAManager scmhaManager = MockSCMHAManager.getInstance(true);
    PipelineStateManager stateManager = PipelineStateManagerImpl.newBuilder().setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)).setRatisServer(scmhaManager.getRatisServer()).setNodeManager(nodeManager).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
    PipelineProvider provider = new MockRatisPipelineProvider(nodeManager, stateManager, conf);
    int healthyNodeCount = nodeManager.getNodeCount(NodeStatus.inServiceHealthy());
    int intersectionCount = 0;
    int createdPipelineCount = 0;
    while (!end && createdPipelineCount <= healthyNodeCount * nodeHeaviness) {
        try {
            Pipeline pipeline = provider.create(RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
            HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(ClientVersions.CURRENT_VERSION);
            stateManager.addPipeline(pipelineProto);
            nodeManager.addPipeline(pipeline);
            List<Pipeline> overlapPipelines = RatisPipelineUtils.checkPipelineContainSameDatanodes(stateManager, pipeline);
            if (overlapPipelines.isEmpty()) {
                intersectionCount++;
                for (Pipeline overlapPipeline : overlapPipelines) {
                    LOG.info("This pipeline: " + pipeline.getId().toString() + " overlaps with previous pipeline: " + overlapPipeline.getId() + ". They share same set of datanodes as: " + pipeline.getNodesInOrder().get(0).getUuid() + "/" + pipeline.getNodesInOrder().get(1).getUuid() + "/" + pipeline.getNodesInOrder().get(2).getUuid() + " and " + overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(2).getUuid() + " is the same.");
                }
            }
            createdPipelineCount++;
        } catch (SCMException e) {
            end = true;
        } catch (IOException e) {
            end = true;
            // Should not throw regular IOException.
            Assert.fail();
        }
    }
    end = false;
    LOG.info("Among total " + stateManager.getPipelines(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).size() + " created pipelines" + " with " + healthyNodeCount + " healthy datanodes and " + nodeHeaviness + " as node heaviness, " + intersectionCount + " pipelines has same set of datanodes.");
}
Also used : MockSCMHAManager(org.apache.hadoop.hdds.scm.ha.MockSCMHAManager) SCMHAManager(org.apache.hadoop.hdds.scm.ha.SCMHAManager) IOException(java.io.IOException) MockNodeManager(org.apache.hadoop.hdds.scm.container.MockNodeManager) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) MockNodeManager(org.apache.hadoop.hdds.scm.container.MockNodeManager) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) SCMException(org.apache.hadoop.hdds.scm.exceptions.SCMException) Test(org.junit.Test)

Example 18 with NodeManager

use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.

the class TestReconPipelineManager method testInitialize.

@Test
public void testInitialize() throws IOException {
    // Get 3 OPEN pipelines from SCM.
    List<Pipeline> pipelinesFromScm = getPipelines(3);
    // Recon has 2 pipelines in ALLOCATED state. (1 is valid and 1 is obsolete)
    // Valid pipeline in Allocated state.
    Pipeline validPipeline = Pipeline.newBuilder().setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)).setId(pipelinesFromScm.get(0).getId()).setNodes(pipelinesFromScm.get(0).getNodes()).setState(Pipeline.PipelineState.ALLOCATED).build();
    // Invalid pipeline.
    Pipeline invalidPipeline = Pipeline.newBuilder().setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)).setId(PipelineID.randomId()).setNodes(Collections.singletonList(randomDatanodeDetails())).setState(Pipeline.PipelineState.CLOSED).build();
    NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
    EventQueue eventQueue = new EventQueue();
    this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
    Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, SCMContext.emptyContext(), versionManager);
    try (ReconPipelineManager reconPipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManager, ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue, scmhaManager, scmContext)) {
        scmContext = new SCMContext.Builder().setIsInSafeMode(true).setLeader(true).setIsPreCheckComplete(true).setSCM(mock(StorageContainerManager.class)).build();
        reconPipelineManager.setScmContext(scmContext);
        reconPipelineManager.addPipeline(validPipeline);
        reconPipelineManager.addPipeline(invalidPipeline);
        reconPipelineManager.initializePipelines(pipelinesFromScm);
        List<Pipeline> newReconPipelines = reconPipelineManager.getPipelines();
        // Test if the number of pipelines in SCM is as expected.
        assertEquals(3, newReconPipelines.size());
        // Test if new pipelines from SCM are picked up.
        for (Pipeline pipeline : pipelinesFromScm) {
            assertTrue(reconPipelineManager.containsPipeline(pipeline.getId()));
        }
        // Test if existing pipeline state is updated.
        assertEquals(Pipeline.PipelineState.OPEN, reconPipelineManager.getPipeline(validPipeline.getId()).getPipelineState());
        // Test if obsolete pipelines in Recon are removed.
        assertFalse(reconPipelineManager.containsPipeline(invalidPipeline.getId()));
    }
}
Also used : NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) DBStoreBuilder(org.apache.hadoop.hdds.utils.db.DBStoreBuilder) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) OMMetadataManagerTestUtils.getRandomPipeline(org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 19 with NodeManager

use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.

the class TestReconPipelineManager method testStubbedReconPipelineFactory.

@Test
public void testStubbedReconPipelineFactory() throws IOException {
    NodeManager nodeManagerMock = mock(NodeManager.class);
    ReconPipelineManager reconPipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManagerMock, ReconSCMDBDefinition.PIPELINES.getTable(store), new EventQueue(), scmhaManager, scmContext);
    PipelineFactory pipelineFactory = reconPipelineManager.getPipelineFactory();
    assertTrue(pipelineFactory instanceof ReconPipelineFactory);
    ReconPipelineFactory reconPipelineFactory = (ReconPipelineFactory) pipelineFactory;
    assertTrue(reconPipelineFactory.getProviders().isEmpty());
    for (ReplicationType type : reconPipelineFactory.getProviders().keySet()) {
        PipelineProvider pipelineProvider = reconPipelineFactory.getProviders().get(type);
        assertTrue(pipelineProvider instanceof ReconPipelineProvider);
    }
}
Also used : NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) ReplicationType(org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType) PipelineFactory(org.apache.hadoop.hdds.scm.pipeline.PipelineFactory) ReconPipelineProvider(org.apache.hadoop.ozone.recon.scm.ReconPipelineFactory.ReconPipelineProvider) ReconPipelineProvider(org.apache.hadoop.ozone.recon.scm.ReconPipelineFactory.ReconPipelineProvider) PipelineProvider(org.apache.hadoop.hdds.scm.pipeline.PipelineProvider) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) Test(org.junit.Test)

Example 20 with NodeManager

use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.

the class TestSCMContainerPlacementCapacity method chooseDatanodes.

@Test
public void chooseDatanodes() throws SCMException {
    // given
    OzoneConfiguration conf = new OzoneConfiguration();
    // We are using small units here
    conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 1, StorageUnit.BYTES);
    List<DatanodeInfo> datanodes = new ArrayList<>();
    for (int i = 0; i < 7; i++) {
        DatanodeInfo datanodeInfo = new DatanodeInfo(MockDatanodeDetails.randomDatanodeDetails(), NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
        StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null);
        MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null);
        datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
        datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
        datanodes.add(datanodeInfo);
    }
    StorageReportProto storage2 = HddsTestUtils.createStorageReport(datanodes.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), 100L, 90L, 10L, null);
    datanodes.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
    StorageReportProto storage3 = HddsTestUtils.createStorageReport(datanodes.get(3).getUuid(), "/data1-" + datanodes.get(3).getUuidString(), 100L, 80L, 20L, null);
    datanodes.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
    StorageReportProto storage4 = HddsTestUtils.createStorageReport(datanodes.get(4).getUuid(), "/data1-" + datanodes.get(4).getUuidString(), 100L, 70L, 30L, null);
    datanodes.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
    when(mockNodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
    when(mockNodeManager.getNodeStat(anyObject())).thenReturn(new SCMNodeMetric(100L, 0L, 100L));
    when(mockNodeManager.getNodeStat(datanodes.get(2))).thenReturn(new SCMNodeMetric(100L, 90L, 10L));
    when(mockNodeManager.getNodeStat(datanodes.get(3))).thenReturn(new SCMNodeMetric(100L, 80L, 20L));
    when(mockNodeManager.getNodeStat(datanodes.get(4))).thenReturn(new SCMNodeMetric(100L, 70L, 30L));
    SCMContainerPlacementCapacity scmContainerPlacementRandom = new SCMContainerPlacementCapacity(mockNodeManager, conf, null, true, null);
    List<DatanodeDetails> existingNodes = new ArrayList<>();
    existingNodes.add(datanodes.get(0));
    existingNodes.add(datanodes.get(1));
    Map<DatanodeDetails, Integer> selectedCount = new HashMap<>();
    for (DatanodeDetails datanode : datanodes) {
        selectedCount.put(datanode, 0);
    }
    for (int i = 0; i < 1000; i++) {
        // when
        List<DatanodeDetails> datanodeDetails = scmContainerPlacementRandom.chooseDatanodes(existingNodes, null, 1, 15, 15);
        // then
        Assert.assertEquals(1, datanodeDetails.size());
        DatanodeDetails datanode0Details = datanodeDetails.get(0);
        Assert.assertNotEquals("Datanode 0 should not been selected: excluded by parameter", datanodes.get(0), datanode0Details);
        Assert.assertNotEquals("Datanode 1 should not been selected: excluded by parameter", datanodes.get(1), datanode0Details);
        Assert.assertNotEquals("Datanode 2 should not been selected: not enough space there", datanodes.get(2), datanode0Details);
        selectedCount.put(datanode0Details, selectedCount.get(datanode0Details) + 1);
    }
    // datanode 6 has more space than datanode 3 and datanode 4.
    Assert.assertTrue(selectedCount.get(datanodes.get(3)) < selectedCount.get(datanodes.get(6)));
    Assert.assertTrue(selectedCount.get(datanodes.get(4)) < selectedCount.get(datanodes.get(6)));
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) SCMNodeMetric(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) Test(org.junit.Test)

Aggregations

NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)28 Test (org.junit.Test)21 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)15 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)10 ArrayList (java.util.ArrayList)6 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)6 NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)6 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)5 MetadataStorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto)5 StorageReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto)5 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)5 NetworkTopology (org.apache.hadoop.hdds.scm.net.NetworkTopology)5 DatanodeInfo (org.apache.hadoop.hdds.scm.node.DatanodeInfo)5 SCMNodeManager (org.apache.hadoop.hdds.scm.node.SCMNodeManager)5 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)5 EventPublisher (org.apache.hadoop.hdds.server.events.EventPublisher)5 EventQueue (org.apache.hadoop.hdds.server.events.EventQueue)5 SCMException (org.apache.hadoop.hdds.scm.exceptions.SCMException)4 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)4 IOException (java.io.IOException)3