Search in sources :

Example 1 with ContainerReportFromDatanode

use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode in project ozone by apache.

the class TestSCMDatanodeHeartbeatDispatcher method testContainerReportDispatcher.

@Test
public void testContainerReportDispatcher() throws IOException {
    AtomicInteger eventReceived = new AtomicInteger();
    ContainerReportsProto containerReport = ContainerReportsProto.getDefaultInstance();
    CommandStatusReportsProto commandStatusReport = CommandStatusReportsProto.getDefaultInstance();
    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
    Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())).thenReturn(true);
    SCMDatanodeHeartbeatDispatcher dispatcher = new SCMDatanodeHeartbeatDispatcher(mockNodeManager, new EventPublisher() {

        @Override
        public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(EVENT event, PAYLOAD payload) {
            Assert.assertTrue(event.equals(CONTAINER_REPORT) || event.equals(CMD_STATUS_REPORT));
            if (payload instanceof ContainerReportFromDatanode) {
                Assert.assertEquals(containerReport, ((ContainerReportFromDatanode) payload).getReport());
            }
            if (payload instanceof CommandStatusReportFromDatanode) {
                Assert.assertEquals(commandStatusReport, ((CommandStatusReportFromDatanode) payload).getReport());
            }
            eventReceived.incrementAndGet();
        }
    });
    DatanodeDetails datanodeDetails = randomDatanodeDetails();
    SCMHeartbeatRequestProto heartbeat = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(datanodeDetails.getProtoBufMessage()).setContainerReport(containerReport).addCommandStatusReports(commandStatusReport).build();
    dispatcher.dispatch(heartbeat);
    Assert.assertEquals(2, eventReceived.get());
}
Also used : EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) CommandStatusReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) SCMHeartbeatRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) CommandStatusReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto) Test(org.junit.Test)

Example 2 with ContainerReportFromDatanode

use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode in project ozone by apache.

the class TestIncrementalContainerReportHandler method testICRFCRRace.

@Test
public // until the code was changed to fix the race condition.
void testICRFCRRace() throws IOException, NodeNotFoundException, ExecutionException, InterruptedException {
    final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
    final ContainerReportHandler fullReportHandler = new ContainerReportHandler(nodeManager, containerManager);
    final ContainerInfo container = getContainer(LifeCycleState.CLOSED);
    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
    final DatanodeDetails datanode = randomDatanodeDetails();
    nodeManager.register(datanode, null, null);
    containerStateManager.addContainer(container.getProtobuf());
    containerStateManager.addContainer(containerTwo.getProtobuf());
    Assert.assertEquals(0, nodeManager.getContainers(datanode).size());
    final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), CLOSED, datanode.getUuidString());
    final IncrementalContainerReportFromDatanode icr = new IncrementalContainerReportFromDatanode(datanode, containerReport);
    final ContainerReportsProto fullReport = TestContainerReportHandler.getContainerReportsProto(containerTwo.containerID(), CLOSED, datanode.getUuidString());
    final ContainerReportFromDatanode fcr = new ContainerReportFromDatanode(datanode, fullReport);
    // We need to run the FCR and ICR at the same time via the executor so we
    // can try to simulate the race condition.
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(2);
    try {
        // test failed consistently every time (reproducing the issue).
        for (int i = 0; i < 10; i++) {
            Future<?> t1 = executor.submit(() -> fullReportHandler.onMessage(fcr, publisher));
            Future<?> t2 = executor.submit(() -> reportHandler.onMessage(icr, publisher));
            t1.get();
            t2.get();
            Set<ContainerID> nmContainers = nodeManager.getContainers(datanode);
            if (nmContainers.contains(container.containerID())) {
                // If we find "container" in the NM, then we must also have it in
                // Container Manager.
                Assert.assertEquals(1, containerStateManager.getContainerReplicas(container.containerID()).size());
                Assert.assertEquals(2, nmContainers.size());
            } else {
                // If the race condition occurs as mentioned in HDDS-5249, then this
                // assert should fail. We will have found nothing for "container" in
                // NM, but have found something for it in ContainerManager, and that
                // should not happen. It should be in both, or neither.
                Assert.assertEquals(0, containerStateManager.getContainerReplicas(container.containerID()).size());
                Assert.assertEquals(1, nmContainers.size());
            }
            Assert.assertEquals(1, containerStateManager.getContainerReplicas(containerTwo.containerID()).size());
        }
    } finally {
        executor.shutdown();
    }
}
Also used : IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) IncrementalContainerReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) Test(org.junit.Test)

Example 3 with ContainerReportFromDatanode

use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode in project ozone by apache.

the class TestContainerReportHandler method testClosingToClosed.

@Test
public void testClosingToClosed() throws NodeNotFoundException, IOException {
    /*
     * The container is in CLOSING state and all the replicas are in
     * OPEN/CLOSING state.
     *
     * The datanode reports that one of the replica is now CLOSED.
     *
     * In this case SCM should mark the container as CLOSED.
     */
    final ContainerReportHandler reportHandler = new ContainerReportHandler(nodeManager, containerManager);
    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(NodeStatus.inServiceHealthy()).iterator();
    final DatanodeDetails datanodeOne = nodeIterator.next();
    final DatanodeDetails datanodeTwo = nodeIterator.next();
    final DatanodeDetails datanodeThree = nodeIterator.next();
    final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSING);
    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
    final Set<ContainerID> containerIDSet = Stream.of(containerOne.containerID(), containerTwo.containerID()).collect(Collectors.toSet());
    final Set<ContainerReplica> containerOneReplicas = getReplicas(containerOne.containerID(), ContainerReplicaProto.State.CLOSING, datanodeOne);
    containerOneReplicas.addAll(getReplicas(containerOne.containerID(), ContainerReplicaProto.State.OPEN, datanodeTwo, datanodeThree));
    final Set<ContainerReplica> containerTwoReplicas = getReplicas(containerTwo.containerID(), ContainerReplicaProto.State.CLOSED, datanodeOne, datanodeTwo, datanodeThree);
    nodeManager.setContainers(datanodeOne, containerIDSet);
    nodeManager.setContainers(datanodeTwo, containerIDSet);
    nodeManager.setContainers(datanodeThree, containerIDSet);
    containerStateManager.addContainer(containerOne.getProtobuf());
    containerStateManager.addContainer(containerTwo.getProtobuf());
    containerOneReplicas.forEach(r -> containerStateManager.updateContainerReplica(containerTwo.containerID(), r));
    containerTwoReplicas.forEach(r -> containerStateManager.updateContainerReplica(containerTwo.containerID(), r));
    final ContainerReportsProto containerReport = getContainerReportsProto(containerOne.containerID(), ContainerReplicaProto.State.CLOSED, datanodeOne.getUuidString());
    final ContainerReportFromDatanode containerReportFromDatanode = new ContainerReportFromDatanode(datanodeOne, containerReport);
    reportHandler.onMessage(containerReportFromDatanode, publisher);
    Assert.assertEquals(LifeCycleState.CLOSED, containerManager.getContainer(containerOne.containerID()).getState());
}
Also used : ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) Test(org.junit.Test)

Example 4 with ContainerReportFromDatanode

use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode in project ozone by apache.

the class TestContainerReportHandler method testClosingToQuasiClosed.

@Test
public void testClosingToQuasiClosed() throws NodeNotFoundException, IOException {
    /*
     * The container is in CLOSING state and all the replicas are in
     * OPEN/CLOSING state.
     *
     * The datanode reports that the replica is now QUASI_CLOSED.
     *
     * In this case SCM should move the container to QUASI_CLOSED.
     */
    final ContainerReportHandler reportHandler = new ContainerReportHandler(nodeManager, containerManager);
    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(NodeStatus.inServiceHealthy()).iterator();
    final DatanodeDetails datanodeOne = nodeIterator.next();
    final DatanodeDetails datanodeTwo = nodeIterator.next();
    final DatanodeDetails datanodeThree = nodeIterator.next();
    final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSING);
    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
    final Set<ContainerID> containerIDSet = Stream.of(containerOne.containerID(), containerTwo.containerID()).collect(Collectors.toSet());
    final Set<ContainerReplica> containerOneReplicas = getReplicas(containerOne.containerID(), ContainerReplicaProto.State.CLOSING, datanodeOne, datanodeTwo);
    containerOneReplicas.addAll(getReplicas(containerOne.containerID(), ContainerReplicaProto.State.OPEN, datanodeThree));
    final Set<ContainerReplica> containerTwoReplicas = getReplicas(containerTwo.containerID(), ContainerReplicaProto.State.CLOSED, datanodeOne, datanodeTwo, datanodeThree);
    nodeManager.setContainers(datanodeOne, containerIDSet);
    nodeManager.setContainers(datanodeTwo, containerIDSet);
    nodeManager.setContainers(datanodeThree, containerIDSet);
    containerStateManager.addContainer(containerOne.getProtobuf());
    containerStateManager.addContainer(containerTwo.getProtobuf());
    containerOneReplicas.forEach(r -> containerStateManager.updateContainerReplica(containerTwo.containerID(), r));
    containerTwoReplicas.forEach(r -> containerStateManager.updateContainerReplica(containerTwo.containerID(), r));
    final ContainerReportsProto containerReport = getContainerReportsProto(containerOne.containerID(), ContainerReplicaProto.State.QUASI_CLOSED, datanodeOne.getUuidString());
    final ContainerReportFromDatanode containerReportFromDatanode = new ContainerReportFromDatanode(datanodeOne, containerReport);
    reportHandler.onMessage(containerReportFromDatanode, publisher);
    Assert.assertEquals(LifeCycleState.QUASI_CLOSED, containerManager.getContainer(containerOne.containerID()).getState());
}
Also used : ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) Test(org.junit.Test)

Example 5 with ContainerReportFromDatanode

use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode in project ozone by apache.

the class TestContainerReportHandler method testStaleReplicaOfDeletedContainer.

@Test
public void testStaleReplicaOfDeletedContainer() throws NodeNotFoundException, IOException {
    final ContainerReportHandler reportHandler = new ContainerReportHandler(nodeManager, containerManager);
    final Iterator<DatanodeDetails> nodeIterator = nodeManager.getNodes(NodeStatus.inServiceHealthy()).iterator();
    final DatanodeDetails datanodeOne = nodeIterator.next();
    final ContainerInfo containerOne = getContainer(LifeCycleState.DELETED);
    final Set<ContainerID> containerIDSet = Stream.of(containerOne.containerID()).collect(Collectors.toSet());
    nodeManager.setContainers(datanodeOne, containerIDSet);
    containerStateManager.addContainer(containerOne.getProtobuf());
    // Expects the replica will be deleted.
    final ContainerReportsProto containerReport = getContainerReportsProto(containerOne.containerID(), ContainerReplicaProto.State.CLOSED, datanodeOne.getUuidString());
    final ContainerReportFromDatanode containerReportFromDatanode = new ContainerReportFromDatanode(datanodeOne, containerReport);
    reportHandler.onMessage(containerReportFromDatanode, publisher);
    Mockito.verify(publisher, Mockito.times(1)).fireEvent(Mockito.any(), Mockito.any(CommandForDatanode.class));
    Assert.assertEquals(0, containerManager.getContainerReplicas(containerOne.containerID()).size());
}
Also used : CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) Test(org.junit.Test)

Aggregations

ContainerReportFromDatanode (org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode)10 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)9 ContainerReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto)9 Test (org.junit.Test)8 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)2 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)2 IncrementalContainerReportFromDatanode (org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 CommandStatusReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto)1 IncrementalContainerReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto)1 SCMHeartbeatRequestProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto)1 CommandStatusReportHandler (org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler)1 CloseContainerEventHandler (org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler)1 ContainerActionsHandler (org.apache.hadoop.hdds.scm.container.ContainerActionsHandler)1 ContainerReportHandler (org.apache.hadoop.hdds.scm.container.ContainerReportHandler)1 IncrementalContainerReportHandler (org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler)1 CRLStatusReportHandler (org.apache.hadoop.hdds.scm.crl.CRLStatusReportHandler)1 DeadNodeHandler (org.apache.hadoop.hdds.scm.node.DeadNodeHandler)1 HealthyReadOnlyNodeHandler (org.apache.hadoop.hdds.scm.node.HealthyReadOnlyNodeHandler)1 NewNodeHandler (org.apache.hadoop.hdds.scm.node.NewNodeHandler)1