Search in sources :

Example 1 with EventPublisher

use of org.apache.hadoop.hdds.server.events.EventPublisher in project ozone by apache.

the class TestReconPipelineReportHandler method testProcessPipelineReport.

@Test
public void testProcessPipelineReport() throws IOException {
    // Check with pipeline which does not exist in Recon.
    Pipeline pipeline = getRandomPipeline();
    PipelineID pipelineID = pipeline.getId();
    HddsProtos.PipelineID pipelineIDProto = pipelineID.getProtobuf();
    ReconPipelineManager reconPipelineManagerMock = mock(ReconPipelineManager.class);
    when(reconPipelineManagerMock.getPipeline(pipelineID)).thenReturn(pipeline);
    StorageContainerServiceProvider scmServiceProviderMock = mock(StorageContainerServiceProvider.class);
    when(scmServiceProviderMock.getPipeline(pipelineIDProto)).thenReturn(pipeline);
    OzoneConfiguration configuration = new OzoneConfiguration();
    ReconPipelineReportHandler handler = new ReconPipelineReportHandler(new ReconSafeModeManager(), reconPipelineManagerMock, SCMContext.emptyContext(), configuration, scmServiceProviderMock);
    EventPublisher eventPublisherMock = mock(EventPublisher.class);
    PipelineReport report = mock(PipelineReport.class);
    when(report.getPipelineID()).thenReturn(pipelineIDProto);
    handler.processPipelineReport(report, pipeline.getNodes().get(0), eventPublisherMock);
    // Verify that the new pipeline was added to pipeline manager.
    verify(reconPipelineManagerMock, times(1)).addPipeline(pipeline);
    verify(reconPipelineManagerMock, times(1)).getPipeline(pipelineID);
    // Check with pipeline which already exists in Recon.
    pipeline = getRandomPipeline();
    pipelineID = pipeline.getId();
    pipelineIDProto = pipelineID.getProtobuf();
    when(reconPipelineManagerMock.containsPipeline(pipelineID)).thenReturn(true);
    when(reconPipelineManagerMock.getPipeline(pipelineID)).thenReturn(pipeline);
    when(report.getPipelineID()).thenReturn(pipelineIDProto);
    handler.processPipelineReport(report, pipeline.getNodes().get(0), eventPublisherMock);
    // Verify that the pipeline was not added to pipeline manager.
    verify(reconPipelineManagerMock, times(0)).addPipeline(pipeline);
    verify(reconPipelineManagerMock, times(1)).getPipeline(pipelineID);
}
Also used : PipelineReport(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport) StorageContainerServiceProvider(org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) OMMetadataManagerTestUtils.getRandomPipeline(org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 2 with EventPublisher

use of org.apache.hadoop.hdds.server.events.EventPublisher in project ozone by apache.

the class TestSCMDatanodeHeartbeatDispatcher method testContainerReportDispatcher.

@Test
public void testContainerReportDispatcher() throws IOException {
    AtomicInteger eventReceived = new AtomicInteger();
    ContainerReportsProto containerReport = ContainerReportsProto.getDefaultInstance();
    CommandStatusReportsProto commandStatusReport = CommandStatusReportsProto.getDefaultInstance();
    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
    Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())).thenReturn(true);
    SCMDatanodeHeartbeatDispatcher dispatcher = new SCMDatanodeHeartbeatDispatcher(mockNodeManager, new EventPublisher() {

        @Override
        public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(EVENT event, PAYLOAD payload) {
            Assert.assertTrue(event.equals(CONTAINER_REPORT) || event.equals(CMD_STATUS_REPORT));
            if (payload instanceof ContainerReportFromDatanode) {
                Assert.assertEquals(containerReport, ((ContainerReportFromDatanode) payload).getReport());
            }
            if (payload instanceof CommandStatusReportFromDatanode) {
                Assert.assertEquals(commandStatusReport, ((CommandStatusReportFromDatanode) payload).getReport());
            }
            eventReceived.incrementAndGet();
        }
    });
    DatanodeDetails datanodeDetails = randomDatanodeDetails();
    SCMHeartbeatRequestProto heartbeat = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(datanodeDetails.getProtoBufMessage()).setContainerReport(containerReport).addCommandStatusReports(commandStatusReport).build();
    dispatcher.dispatch(heartbeat);
    Assert.assertEquals(2, eventReceived.get());
}
Also used : EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) CommandStatusReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) SCMHeartbeatRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) CommandStatusReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto) Test(org.junit.Test)

Example 3 with EventPublisher

use of org.apache.hadoop.hdds.server.events.EventPublisher in project ozone by apache.

the class TestSCMNodeManager method testProcessLayoutVersionLowerMlv.

@Test
public void testProcessLayoutVersionLowerMlv() throws IOException {
    OzoneConfiguration conf = new OzoneConfiguration();
    SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class);
    when(scmStorageConfig.getClusterID()).thenReturn("xyz111");
    EventPublisher eventPublisher = mock(EventPublisher.class);
    HDDSLayoutVersionManager lvm = new HDDSLayoutVersionManager(scmStorageConfig.getLayoutVersion());
    SCMNodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf), SCMContext.emptyContext(), lvm);
    DatanodeDetails node1 = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
    verify(eventPublisher, times(1)).fireEvent(NEW_NODE, node1);
    int scmMlv = nodeManager.getLayoutVersionManager().getMetadataLayoutVersion();
    nodeManager.processLayoutVersionReport(node1, LayoutVersionProto.newBuilder().setMetadataLayoutVersion(scmMlv - 1).setSoftwareLayoutVersion(scmMlv).build());
    ArgumentCaptor<CommandForDatanode> captor = ArgumentCaptor.forClass(CommandForDatanode.class);
    verify(eventPublisher, times(1)).fireEvent(Mockito.eq(DATANODE_COMMAND), captor.capture());
    assertTrue(captor.getValue().getDatanodeId().equals(node1.getUuid()));
    assertTrue(captor.getValue().getCommand().getType().equals(finalizeNewLayoutVersionCommand));
}
Also used : NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) SCMStorageConfig(org.apache.hadoop.hdds.scm.server.SCMStorageConfig) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) MockDatanodeDetails.createDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) Test(org.junit.Test)

Example 4 with EventPublisher

use of org.apache.hadoop.hdds.server.events.EventPublisher in project ozone by apache.

the class TestSCMNodeManager method testScmNodeReportUpdate.

/**
 * Test single node stat update based on nodereport from different heartbeat
 * status (healthy, stale and dead).
 * @throws IOException
 * @throws InterruptedException
 * @throws TimeoutException
 */
@Test
public void testScmNodeReportUpdate() throws IOException, InterruptedException, TimeoutException, AuthenticationException {
    OzoneConfiguration conf = getConf();
    final int heartbeatCount = 5;
    final int nodeCount = 1;
    final int interval = 100;
    conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, MILLISECONDS);
    conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS);
    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
    try (SCMNodeManager nodeManager = createNodeManager(conf)) {
        DatanodeDetails datanodeDetails = HddsTestUtils.createRandomDatanodeAndRegister(nodeManager);
        NodeReportHandler nodeReportHandler = new NodeReportHandler(nodeManager);
        EventPublisher publisher = mock(EventPublisher.class);
        final long capacity = 2000;
        final long usedPerHeartbeat = 100;
        UUID dnId = datanodeDetails.getUuid();
        for (int x = 0; x < heartbeatCount; x++) {
            long scmUsed = x * usedPerHeartbeat;
            long remaining = capacity - scmUsed;
            String storagePath = testDir.getAbsolutePath() + "/" + dnId;
            StorageReportProto report = HddsTestUtils.createStorageReport(dnId, storagePath, capacity, scmUsed, remaining, null);
            NodeReportProto nodeReportProto = HddsTestUtils.createNodeReport(Arrays.asList(report), Collections.emptyList());
            nodeReportHandler.onMessage(new NodeReportFromDatanode(datanodeDetails, nodeReportProto), publisher);
            LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
            LayoutVersionProto layoutInfo = toLayoutVersionProto(versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion());
            nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
            Thread.sleep(100);
        }
        final long expectedScmUsed = usedPerHeartbeat * (heartbeatCount - 1);
        final long expectedRemaining = capacity - expectedScmUsed;
        GenericTestUtils.waitFor(() -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, 100, 4 * 1000);
        long foundCapacity = nodeManager.getStats().getCapacity().get();
        assertEquals(capacity, foundCapacity);
        long foundScmUsed = nodeManager.getStats().getScmUsed().get();
        assertEquals(expectedScmUsed, foundScmUsed);
        long foundRemaining = nodeManager.getStats().getRemaining().get();
        assertEquals(expectedRemaining, foundRemaining);
        // Test NodeManager#getNodeStats
        assertEquals(nodeCount, nodeManager.getNodeStats().size());
        long nodeCapacity = nodeManager.getNodeStat(datanodeDetails).get().getCapacity().get();
        assertEquals(capacity, nodeCapacity);
        foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed().get();
        assertEquals(expectedScmUsed, foundScmUsed);
        foundRemaining = nodeManager.getNodeStat(datanodeDetails).get().getRemaining().get();
        assertEquals(expectedRemaining, foundRemaining);
        // Compare the result from
        // NodeManager#getNodeStats and NodeManager#getNodeStat
        SCMNodeStat stat1 = nodeManager.getNodeStats().get(datanodeDetails);
        SCMNodeStat stat2 = nodeManager.getNodeStat(datanodeDetails).get();
        assertEquals(stat1, stat2);
        // Wait up to 4s so that the node becomes stale
        // Verify the usage info should be unchanged.
        GenericTestUtils.waitFor(() -> nodeManager.getNodeCount(NodeStatus.inServiceStale()) == 1, 100, 4 * 1000);
        assertEquals(nodeCount, nodeManager.getNodeStats().size());
        foundCapacity = nodeManager.getNodeStat(datanodeDetails).get().getCapacity().get();
        assertEquals(capacity, foundCapacity);
        foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed().get();
        assertEquals(expectedScmUsed, foundScmUsed);
        foundRemaining = nodeManager.getNodeStat(datanodeDetails).get().getRemaining().get();
        assertEquals(expectedRemaining, foundRemaining);
        // Wait up to 4 more seconds so the node becomes dead
        // Verify usage info should be updated.
        GenericTestUtils.waitFor(() -> nodeManager.getNodeCount(NodeStatus.inServiceDead()) == 1, 100, 4 * 1000);
        assertEquals(0, nodeManager.getNodeStats().size());
        foundCapacity = nodeManager.getStats().getCapacity().get();
        assertEquals(0, foundCapacity);
        foundScmUsed = nodeManager.getStats().getScmUsed().get();
        assertEquals(0, foundScmUsed);
        foundRemaining = nodeManager.getStats().getRemaining().get();
        assertEquals(0, foundRemaining);
        LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
        LayoutVersionProto layoutInfo = toLayoutVersionProto(versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion());
        nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
        // Wait up to 5 seconds so that the dead node becomes healthy
        // Verify usage info should be updated.
        GenericTestUtils.waitFor(() -> nodeManager.getNodeCount(NodeStatus.inServiceHealthy()) == 1, 100, 5 * 1000);
        GenericTestUtils.waitFor(() -> nodeManager.getStats().getScmUsed().get() == expectedScmUsed, 100, 4 * 1000);
        assertEquals(nodeCount, nodeManager.getNodeStats().size());
        foundCapacity = nodeManager.getNodeStat(datanodeDetails).get().getCapacity().get();
        assertEquals(capacity, foundCapacity);
        foundScmUsed = nodeManager.getNodeStat(datanodeDetails).get().getScmUsed().get();
        assertEquals(expectedScmUsed, foundScmUsed);
        foundRemaining = nodeManager.getNodeStat(datanodeDetails).get().getRemaining().get();
        assertEquals(expectedRemaining, foundRemaining);
    }
}
Also used : UpgradeUtils.toLayoutVersionProto(org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) NodeReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode) SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) MockDatanodeDetails.createDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) LayoutVersionManager(org.apache.hadoop.ozone.upgrade.LayoutVersionManager) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) UUID(java.util.UUID) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) Test(org.junit.Test)

Example 5 with EventPublisher

use of org.apache.hadoop.hdds.server.events.EventPublisher in project ozone by apache.

the class TestSCMDatanodeHeartbeatDispatcher method testNodeReportDispatcher.

@Test
public void testNodeReportDispatcher() throws IOException {
    AtomicInteger eventReceived = new AtomicInteger();
    NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
    Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())).thenReturn(true);
    SCMDatanodeHeartbeatDispatcher dispatcher = new SCMDatanodeHeartbeatDispatcher(mockNodeManager, new EventPublisher() {

        @Override
        public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(EVENT event, PAYLOAD payload) {
            Assert.assertEquals(event, NODE_REPORT);
            eventReceived.incrementAndGet();
            Assert.assertEquals(nodeReport, ((NodeReportFromDatanode) payload).getReport());
        }
    });
    DatanodeDetails datanodeDetails = randomDatanodeDetails();
    SCMHeartbeatRequestProto heartbeat = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(datanodeDetails.getProtoBufMessage()).setNodeReport(nodeReport).build();
    dispatcher.dispatch(heartbeat);
    Assert.assertEquals(1, eventReceived.get());
}
Also used : SCMHeartbeatRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) Test(org.junit.Test)

Aggregations

EventPublisher (org.apache.hadoop.hdds.server.events.EventPublisher)7 Test (org.junit.Test)7 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)5 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)5 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)5 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)3 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)3 UUID (java.util.UUID)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)2 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)2 NodeReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto)2 SCMHeartbeatRequestProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto)2 NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)2 NodeReportFromDatanode (org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode)2 SCMStorageConfig (org.apache.hadoop.hdds.scm.server.SCMStorageConfig)2 CommandForDatanode (org.apache.hadoop.ozone.protocol.commands.CommandForDatanode)2 Field (java.lang.reflect.Field)1 Path (java.nio.file.Path)1 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)1