Search in sources :

Example 1 with ContainerReportsProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto in project ozone by apache.

the class RegisterEndpointTask method call.

/**
 * Computes a result, or throws an exception if unable to do so.
 *
 * @return computed result
 * @throws Exception if unable to compute a result
 */
@Override
public EndpointStateMachine.EndPointStates call() throws Exception {
    if (getDatanodeDetails() == null) {
        LOG.error("DatanodeDetails cannot be null in RegisterEndpoint task, " + "shutting down the endpoint.");
        return rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
    }
    rpcEndPoint.lock();
    try {
        if (rpcEndPoint.getState().equals(EndpointStateMachine.EndPointStates.REGISTER)) {
            LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder().setMetadataLayoutVersion(layoutVersionManager.getMetadataLayoutVersion()).setSoftwareLayoutVersion(layoutVersionManager.getSoftwareLayoutVersion()).build();
            ContainerReportsProto containerReport = datanodeContainerManager.getController().getContainerReport();
            NodeReportProto nodeReport = datanodeContainerManager.getNodeReport();
            PipelineReportsProto pipelineReportsProto = datanodeContainerManager.getPipelineReport();
            // TODO : Add responses to the command Queue.
            SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint().register(datanodeDetails.getExtendedProtoBufMessage(), nodeReport, containerReport, pipelineReportsProto, layoutInfo);
            Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()).equals(datanodeDetails.getUuid()), "Unexpected datanode ID in the response.");
            Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()), "Invalid cluster ID in the response.");
            Preconditions.checkState(response.getErrorCode() == success, "DataNode has higher Software Layout Version than SCM.");
            if (response.hasHostname() && response.hasIpAddress()) {
                datanodeDetails.setHostName(response.getHostname());
                datanodeDetails.setIpAddress(response.getIpAddress());
            }
            if (response.hasNetworkName() && response.hasNetworkLocation()) {
                datanodeDetails.setNetworkName(response.getNetworkName());
                datanodeDetails.setNetworkLocation(response.getNetworkLocation());
            }
            EndpointStateMachine.EndPointStates nextState = rpcEndPoint.getState().getNextState();
            rpcEndPoint.setState(nextState);
            rpcEndPoint.zeroMissedCount();
            this.stateContext.configureHeartbeatFrequency();
        }
    } catch (IOException ex) {
        rpcEndPoint.logIfNeeded(ex);
    } finally {
        rpcEndPoint.unlock();
    }
    return rpcEndPoint.getState();
}
Also used : EndpointStateMachine(org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) SCMRegisteredResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto) IOException(java.io.IOException) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) PipelineReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto)

Example 2 with ContainerReportsProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto in project ozone by apache.

the class TestContainerSet method testGetContainerReport.

@Test
public void testGetContainerReport() throws IOException {
    ContainerSet containerSet = createContainerSet();
    ContainerReportsProto containerReportsRequestProto = containerSet.getContainerReport();
    assertEquals(10, containerReportsRequestProto.getReportsList().size());
}
Also used : ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) Test(org.junit.Test)

Example 3 with ContainerReportsProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto in project ozone by apache.

the class StorageContainerDatanodeProtocolServerSideTranslatorPB method register.

public SCMRegisteredResponseProto register(SCMRegisterRequestProto request) throws IOException {
    ContainerReportsProto containerRequestProto = request.getContainerReport();
    NodeReportProto dnNodeReport = request.getNodeReport();
    PipelineReportsProto pipelineReport = request.getPipelineReports();
    LayoutVersionProto layoutInfo = null;
    if (request.hasDataNodeLayoutVersion()) {
        layoutInfo = request.getDataNodeLayoutVersion();
    } else {
        // Backward compatibility to make sure old Datanodes can still talk to
        // SCM.
        layoutInfo = toLayoutVersionProto(INITIAL_VERSION.layoutVersion(), INITIAL_VERSION.layoutVersion());
    }
    return impl.register(request.getExtendedDatanodeDetails(), dnNodeReport, containerRequestProto, pipelineReport, layoutInfo);
}
Also used : ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) UpgradeUtils.toLayoutVersionProto(org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) PipelineReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto)

Example 4 with ContainerReportsProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto in project ozone by apache.

the class TestSCMDatanodeHeartbeatDispatcher method testContainerReportDispatcher.

@Test
public void testContainerReportDispatcher() throws IOException {
    AtomicInteger eventReceived = new AtomicInteger();
    ContainerReportsProto containerReport = ContainerReportsProto.getDefaultInstance();
    CommandStatusReportsProto commandStatusReport = CommandStatusReportsProto.getDefaultInstance();
    NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
    Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())).thenReturn(true);
    SCMDatanodeHeartbeatDispatcher dispatcher = new SCMDatanodeHeartbeatDispatcher(mockNodeManager, new EventPublisher() {

        @Override
        public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(EVENT event, PAYLOAD payload) {
            Assert.assertTrue(event.equals(CONTAINER_REPORT) || event.equals(CMD_STATUS_REPORT));
            if (payload instanceof ContainerReportFromDatanode) {
                Assert.assertEquals(containerReport, ((ContainerReportFromDatanode) payload).getReport());
            }
            if (payload instanceof CommandStatusReportFromDatanode) {
                Assert.assertEquals(commandStatusReport, ((CommandStatusReportFromDatanode) payload).getReport());
            }
            eventReceived.incrementAndGet();
        }
    });
    DatanodeDetails datanodeDetails = randomDatanodeDetails();
    SCMHeartbeatRequestProto heartbeat = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(datanodeDetails.getProtoBufMessage()).setContainerReport(containerReport).addCommandStatusReports(commandStatusReport).build();
    dispatcher.dispatch(heartbeat);
    Assert.assertEquals(2, eventReceived.get());
}
Also used : EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) CommandStatusReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) SCMHeartbeatRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) CommandStatusReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto) Test(org.junit.Test)

Example 5 with ContainerReportsProto

use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto in project ozone by apache.

the class TestIncrementalContainerReportHandler method testICRFCRRace.

@Test
public // until the code was changed to fix the race condition.
void testICRFCRRace() throws IOException, NodeNotFoundException, ExecutionException, InterruptedException {
    final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
    final ContainerReportHandler fullReportHandler = new ContainerReportHandler(nodeManager, containerManager);
    final ContainerInfo container = getContainer(LifeCycleState.CLOSED);
    final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
    final DatanodeDetails datanode = randomDatanodeDetails();
    nodeManager.register(datanode, null, null);
    containerStateManager.addContainer(container.getProtobuf());
    containerStateManager.addContainer(containerTwo.getProtobuf());
    Assert.assertEquals(0, nodeManager.getContainers(datanode).size());
    final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), CLOSED, datanode.getUuidString());
    final IncrementalContainerReportFromDatanode icr = new IncrementalContainerReportFromDatanode(datanode, containerReport);
    final ContainerReportsProto fullReport = TestContainerReportHandler.getContainerReportsProto(containerTwo.containerID(), CLOSED, datanode.getUuidString());
    final ContainerReportFromDatanode fcr = new ContainerReportFromDatanode(datanode, fullReport);
    // We need to run the FCR and ICR at the same time via the executor so we
    // can try to simulate the race condition.
    ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(2);
    try {
        // test failed consistently every time (reproducing the issue).
        for (int i = 0; i < 10; i++) {
            Future<?> t1 = executor.submit(() -> fullReportHandler.onMessage(fcr, publisher));
            Future<?> t2 = executor.submit(() -> reportHandler.onMessage(icr, publisher));
            t1.get();
            t2.get();
            Set<ContainerID> nmContainers = nodeManager.getContainers(datanode);
            if (nmContainers.contains(container.containerID())) {
                // If we find "container" in the NM, then we must also have it in
                // Container Manager.
                Assert.assertEquals(1, containerStateManager.getContainerReplicas(container.containerID()).size());
                Assert.assertEquals(2, nmContainers.size());
            } else {
                // If the race condition occurs as mentioned in HDDS-5249, then this
                // assert should fail. We will have found nothing for "container" in
                // NM, but have found something for it in ContainerManager, and that
                // should not happen. It should be in both, or neither.
                Assert.assertEquals(0, containerStateManager.getContainerReplicas(container.containerID()).size());
                Assert.assertEquals(1, nmContainers.size());
            }
            Assert.assertEquals(1, containerStateManager.getContainerReplicas(containerTwo.containerID()).size());
        }
    } finally {
        executor.shutdown();
    }
}
Also used : IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) ContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode) IncrementalContainerReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto) ContainerReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) IncrementalContainerReportFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode) Test(org.junit.Test)

Aggregations

ContainerReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto)15 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)10 ContainerReportFromDatanode (org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode)9 Test (org.junit.Test)9 ContainerReplicaProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto)3 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)2 LayoutVersionProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto)2 NodeReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto)2 PipelineReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto)2 IOException (java.io.IOException)1 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 CommandStatusReportsProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto)1 IncrementalContainerReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto)1 SCMHeartbeatRequestProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto)1 SCMRegisteredResponseProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto)1 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)1 NodeNotFoundException (org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException)1 CommandStatusReportFromDatanode (org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode)1 IncrementalContainerReportFromDatanode (org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode)1