Search in sources :

Example 1 with IncrementalContainerReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto in project ozone by apache.

the class TestReconIncrementalContainerReportHandler method getIncrementalContainerReportProto.

private static IncrementalContainerReportProto getIncrementalContainerReportProto(final ContainerID containerId, final State state, final String originNodeId) {
    final IncrementalContainerReportProto.Builder crBuilder = IncrementalContainerReportProto.newBuilder();
    final ContainerReplicaProto replicaProto = ContainerReplicaProto.newBuilder().setContainerID(containerId.getId()).setState(state).setOriginNodeId(originNodeId).build();
    return crBuilder.addReport(replicaProto).build();
}
Also used : IncrementalContainerReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto) ContainerReplicaProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto)

Example 2 with IncrementalContainerReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto in project ozone by apache.

the class TestReconIncrementalContainerReportHandler method testProcessICRStateMismatch.

@Test
public void testProcessICRStateMismatch() throws IOException {
    // Recon container state is "OPEN".
    // Replica state could be any Non OPEN state.
    long containerId = 11;
    for (State state : Arrays.asList(State.CLOSING, State.QUASI_CLOSED, State.CLOSED)) {
        ContainerWithPipeline containerWithPipeline = getTestContainer(containerId++, OPEN);
        ContainerID containerID = containerWithPipeline.getContainerInfo().containerID();
        ReconContainerManager containerManager = getContainerManager();
        containerManager.addNewContainer(containerWithPipeline);
        DatanodeDetails datanodeDetails = containerWithPipeline.getPipeline().getFirstNode();
        NodeManager nodeManagerMock = mock(NodeManager.class);
        when(nodeManagerMock.getNodeByUuid(any())).thenReturn(datanodeDetails);
        IncrementalContainerReportFromDatanode reportMock = mock(IncrementalContainerReportFromDatanode.class);
        when(reportMock.getDatanodeDetails()).thenReturn(containerWithPipeline.getPipeline().getFirstNode());
        IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(containerID, state, datanodeDetails.getUuidString());
        when(reportMock.getReport()).thenReturn(containerReport);
        ReconIncrementalContainerReportHandler reconIcr = new ReconIncrementalContainerReportHandler(nodeManagerMock, containerManager, SCMContext.emptyContext());
        reconIcr.onMessage(reportMock, mock(EventPublisher.class));
        assertTrue(containerManager.containerExist(containerID));
        assertEquals(1, containerManager.getContainerReplicas(containerID).size());
        LifeCycleState expectedState = getContainerStateFromReplicaState(state);
        LifeCycleState actualState = containerManager.getContainer(containerID).getState();
        assertEquals(String.format("Expecting %s in " + "container state for replica state %s", expectedState, state), expectedState, actualState);
    }
}
Also used : NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) IncrementalContainerReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) LifeCycleState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState) State(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) LifeCycleState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState) IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Test(org.junit.Test)

Example 3 with IncrementalContainerReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto in project ozone by apache.

the class TestIncrementalContainerReportHandler method testICRFCRRace.

@Test
public // until the code was changed to fix the race condition.
void testICRFCRRace() throws IOException, NodeNotFoundException, ExecutionException, InterruptedException {
    final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
    final ContainerReportHandler fullReportHandler = new ContainerReportHandler(nodeManager, containerManager);
    final ContainerInfo container = getContainer(LifeCycleState.CLOSED);
    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
    final DatanodeDetails datanode = randomDatanodeDetails();
    nodeManager.register(datanode, null, null);
    containerStateManager.addContainer(container.getProtobuf());
    containerStateManager.addContainer(containerTwo.getProtobuf());
    Assert.assertEquals(0, nodeManager.getContainers(datanode).size());
    final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), CLOSED, datanode.getUuidString());
    final IncrementalContainerReportFromDatanode icr = new IncrementalContainerReportFromDatanode(datanode, containerReport);
    final ContainerReportsProto fullReport = TestContainerReportHandler.getContainerReportsProto(containerTwo.containerID(), CLOSED, datanode.getUuidString());
    final ContainerReportFromDatanode fcr = new ContainerReportFromDatanode(datanode, fullReport);
    // We need to run the FCR and ICR at the same time via the executor so we
    // can try to simulate the race condition.
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(2);
    try {
        // test failed consistently every time (reproducing the issue).
        for (int i = 0; i < 10; i++) {
            Future<?> t1 = executor.submit(() -> fullReportHandler.onMessage(fcr, publisher));
            Future<?> t2 = executor.submit(() -> reportHandler.onMessage(icr, publisher));
            t1.get();
            t2.get();
            Set<ContainerID> nmContainers = nodeManager.getContainers(datanode);
            if (nmContainers.contains(container.containerID())) {
                // If we find "container" in the NM, then we must also have it in
                // Container Manager.
                Assert.assertEquals(1, containerStateManager.getContainerReplicas(container.containerID()).size());
                Assert.assertEquals(2, nmContainers.size());
            } else {
                // If the race condition occurs as mentioned in HDDS-5249, then this
                // assert should fail. We will have found nothing for "container" in
                // NM, but have found something for it in ContainerManager, and that
                // should not happen. It should be in both, or neither.
                Assert.assertEquals(0, containerStateManager.getContainerReplicas(container.containerID()).size());
                Assert.assertEquals(1, nmContainers.size());
            }
            Assert.assertEquals(1, containerStateManager.getContainerReplicas(containerTwo.containerID()).size());
        }
    } finally {
        executor.shutdown();
    }
}
Also used : IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) IncrementalContainerReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) Test(org.junit.Test)

Example 4 with IncrementalContainerReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto in project ozone by apache.

the class TestIncrementalContainerReportHandler method testClosingToQuasiClosed.

@Test
public void testClosingToQuasiClosed() throws IOException {
    final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
    final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
    final DatanodeDetails datanodeOne = randomDatanodeDetails();
    final DatanodeDetails datanodeTwo = randomDatanodeDetails();
    final DatanodeDetails datanodeThree = randomDatanodeDetails();
    nodeManager.register(datanodeOne, null, null);
    nodeManager.register(datanodeTwo, null, null);
    nodeManager.register(datanodeThree, null, null);
    final Set<ContainerReplica> containerReplicas = getReplicas(container.containerID(), ContainerReplicaProto.State.CLOSING, datanodeOne, datanodeTwo, datanodeThree);
    containerStateManager.addContainer(container.getProtobuf());
    containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(container.containerID(), r));
    final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), ContainerReplicaProto.State.QUASI_CLOSED, datanodeOne.getUuidString());
    final IncrementalContainerReportFromDatanode icrFromDatanode = new IncrementalContainerReportFromDatanode(datanodeOne, containerReport);
    reportHandler.onMessage(icrFromDatanode, publisher);
    Assert.assertEquals(LifeCycleState.QUASI_CLOSED, containerManager.getContainer(container.containerID()).getState());
}
Also used : IncrementalContainerReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) Test(org.junit.Test)

Example 5 with IncrementalContainerReportProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto in project ozone by apache.

the class TestIncrementalContainerReportHandler method testQuasiClosedToClosed.

@Test
public void testQuasiClosedToClosed() throws IOException {
    final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
    final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
    final DatanodeDetails datanodeOne = randomDatanodeDetails();
    final DatanodeDetails datanodeTwo = randomDatanodeDetails();
    final DatanodeDetails datanodeThree = randomDatanodeDetails();
    nodeManager.register(datanodeOne, null, null);
    nodeManager.register(datanodeTwo, null, null);
    nodeManager.register(datanodeThree, null, null);
    final Set<ContainerReplica> containerReplicas = getReplicas(container.containerID(), ContainerReplicaProto.State.CLOSING, datanodeOne, datanodeTwo);
    containerReplicas.addAll(getReplicas(container.containerID(), ContainerReplicaProto.State.QUASI_CLOSED, datanodeThree));
    containerStateManager.addContainer(container.getProtobuf());
    containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(container.containerID(), r));
    final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), CLOSED, datanodeThree.getUuidString());
    final IncrementalContainerReportFromDatanode icr = new IncrementalContainerReportFromDatanode(datanodeOne, containerReport);
    reportHandler.onMessage(icr, publisher);
    Assert.assertEquals(LifeCycleState.CLOSED, containerManager.getContainer(container.containerID()).getState());
}
Also used : IncrementalContainerReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) Test(org.junit.Test)

Aggregations

IncrementalContainerReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto)10 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)8 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)7 IncrementalContainerReportFromDatanode (org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode)7 Test (org.junit.Test)7 ContainerReplicaProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto)2 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)2 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)2 SCMNodeManager (org.apache.hadoop.hdds.scm.node.SCMNodeManager)2 EventPublisher (org.apache.hadoop.hdds.server.events.EventPublisher)2 Path (java.nio.file.Path)1 UUID (java.util.UUID)1 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)1 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)1 LifeCycleState (org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState)1 CommandStatusReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto)1 State (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State)1 ContainerReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto)1 LayoutVersionProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto)1 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)1