Search in sources :

Example 1 with DatanodeMetadata

use of org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata in project ozone by apache.

the class TestOpenContainerCount method testOpenContainerCount.

@Test
public void testOpenContainerCount() throws Exception {
    // In case of pipeline doesn't exist
    waitAndCheckConditionAfterHeartbeat(() -> {
        DatanodeMetadata datanodeMetadata1 = getDatanodeMetadata();
        return datanodeMetadata1.getContainers() == 10 && datanodeMetadata1.getPipelines().size() == 2;
    });
    DatanodeMetadata datanodeMetadata = getDatanodeMetadata();
    int expectedCnt = datanodeMetadata.getOpenContainers();
    // check if open container's count decrement according
    for (long id = 1L; id <= 10L; ++id) {
        --expectedCnt;
        closeContainer(id);
        DatanodeMetadata metadata = getDatanodeMetadata();
        Assert.assertEquals(expectedCnt, metadata.getOpenContainers());
    }
}
Also used : DatanodeMetadata(org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata) Test(org.junit.Test)

Example 2 with DatanodeMetadata

use of org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata in project ozone by apache.

the class TestOpenContainerCount method getDatanodeMetadata.

private DatanodeMetadata getDatanodeMetadata() {
    Response response = nodeEndpoint.getDatanodes();
    DatanodesResponse datanodesResponse = (DatanodesResponse) response.getEntity();
    DatanodeMetadata datanodeMetadata = datanodesResponse.getDatanodes().stream().filter(metadata -> metadata.getHostname().equals("host1.datanode")).findFirst().orElse(null);
    return datanodeMetadata;
}
Also used : DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) Response(javax.ws.rs.core.Response) HttpServletResponse(javax.servlet.http.HttpServletResponse) DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) DatanodeMetadata(org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata)

Example 3 with DatanodeMetadata

use of org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata in project ozone by apache.

the class NodeEndpoint method getDatanodes.

/**
 * Return the list of datanodes with detailed information about each datanode.
 * @return {@link Response}
 */
@GET
public Response getDatanodes() {
    List<DatanodeMetadata> datanodes = new ArrayList<>();
    List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
    datanodeDetails.forEach(datanode -> {
        DatanodeStorageReport storageReport = getStorageReport(datanode);
        NodeState nodeState = null;
        try {
            nodeState = nodeManager.getNodeStatus(datanode).getHealth();
        } catch (NodeNotFoundException e) {
            LOG.warn("Cannot get nodeState for datanode {}", datanode, e);
        }
        final NodeOperationalState nodeOpState = datanode.getPersistedOpState();
        String hostname = datanode.getHostName();
        Set<PipelineID> pipelineIDs = nodeManager.getPipelines(datanode);
        List<DatanodePipeline> pipelines = new ArrayList<>();
        AtomicInteger leaderCount = new AtomicInteger();
        AtomicInteger openContainers = new AtomicInteger();
        DatanodeMetadata.Builder builder = DatanodeMetadata.newBuilder();
        pipelineIDs.forEach(pipelineID -> {
            try {
                Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
                String leaderNode = pipeline.getLeaderNode().getHostName();
                DatanodePipeline datanodePipeline = new DatanodePipeline(pipelineID.getId(), pipeline.getReplicationConfig().getReplicationType().toString(), ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()).getNumber(), leaderNode);
                pipelines.add(datanodePipeline);
                if (datanode.getUuid().equals(pipeline.getLeaderId())) {
                    leaderCount.getAndIncrement();
                }
                int openContainerPerPipeline = reconContainerManager.getPipelineToOpenContainer().getOrDefault(pipelineID, 0);
                openContainers.getAndAdd(openContainerPerPipeline);
            } catch (PipelineNotFoundException ex) {
                LOG.warn("Cannot get pipeline {} for datanode {}, pipeline not found", pipelineID.getId(), hostname, ex);
            } catch (IOException ioEx) {
                LOG.warn("Cannot get leader node of pipeline with id {}.", pipelineID.getId(), ioEx);
            }
        });
        try {
            Set<ContainerID> allContainers = nodeManager.getContainers(datanode);
            builder.withContainers(allContainers.size());
            builder.withOpenContainers(openContainers.get());
        } catch (NodeNotFoundException ex) {
            LOG.warn("Cannot get containers, datanode {} not found.", datanode.getUuid(), ex);
        }
        DatanodeInfo dnInfo = (DatanodeInfo) datanode;
        datanodes.add(builder.withHostname(nodeManager.getHostName(datanode)).withDatanodeStorageReport(storageReport).withLastHeartbeat(nodeManager.getLastHeartbeat(datanode)).withState(nodeState).withOperationalState(nodeOpState).withPipelines(pipelines).withLeaderCount(leaderCount.get()).withUUid(datanode.getUuidString()).withVersion(nodeManager.getVersion(datanode)).withSetupTime(nodeManager.getSetupTime(datanode)).withRevision(nodeManager.getRevision(datanode)).withBuildDate(nodeManager.getBuildDate(datanode)).withLayoutVersion(dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion()).build());
    });
    DatanodesResponse datanodesResponse = new DatanodesResponse(datanodes.size(), datanodes);
    return Response.ok(datanodesResponse).build();
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) NodeState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState) DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) DatanodeStorageReport(org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport) DatanodeMetadata(org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DatanodePipeline(org.apache.hadoop.ozone.recon.api.types.DatanodePipeline) DatanodePipeline(org.apache.hadoop.ozone.recon.api.types.DatanodePipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeOperationalState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) PipelineNotFoundException(org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException) GET(javax.ws.rs.GET)

Example 4 with DatanodeMetadata

use of org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata in project ozone by apache.

the class TestEndpoints method testGetDatanodes.

@Test
public void testGetDatanodes() throws Exception {
    Response response = nodeEndpoint.getDatanodes();
    DatanodesResponse datanodesResponse = (DatanodesResponse) response.getEntity();
    Assert.assertEquals(2, datanodesResponse.getTotalCount());
    Assert.assertEquals(2, datanodesResponse.getDatanodes().size());
    datanodesResponse.getDatanodes().forEach(datanodeMetadata -> {
        try {
            testDatanodeResponse(datanodeMetadata);
        } catch (IOException e) {
            Assert.fail(e.getMessage());
        }
    });
    waitAndCheckConditionAfterHeartbeat(() -> {
        Response response1 = nodeEndpoint.getDatanodes();
        DatanodesResponse datanodesResponse1 = (DatanodesResponse) response1.getEntity();
        DatanodeMetadata datanodeMetadata1 = datanodesResponse1.getDatanodes().stream().filter(datanodeMetadata -> datanodeMetadata.getHostname().equals("host1.datanode")).findFirst().orElse(null);
        return (datanodeMetadata1 != null && datanodeMetadata1.getContainers() == 1 && datanodeMetadata1.getOpenContainers() == 1 && reconScm.getPipelineManager().getContainersInPipeline(pipeline.getId()).size() == 1);
    });
    // Change Node OperationalState with NodeManager
    final NodeManager nodeManager = reconScm.getScmNodeManager();
    final DatanodeDetails dnDetailsInternal = nodeManager.getNodeByUuid(datanodeDetails.getUuidString());
    // Backup existing state and sanity check
    final NodeStatus nStatus = nodeManager.getNodeStatus(dnDetailsInternal);
    final NodeOperationalState backupOpState = dnDetailsInternal.getPersistedOpState();
    final long backupOpStateExpiry = dnDetailsInternal.getPersistedOpStateExpiryEpochSec();
    assertEquals(backupOpState, nStatus.getOperationalState());
    assertEquals(backupOpStateExpiry, nStatus.getOpStateExpiryEpochSeconds());
    dnDetailsInternal.setPersistedOpState(NodeOperationalState.DECOMMISSIONING);
    dnDetailsInternal.setPersistedOpStateExpiryEpochSec(666L);
    nodeManager.setNodeOperationalState(dnDetailsInternal, NodeOperationalState.DECOMMISSIONING, 666L);
    // Check if the endpoint response reflects the change
    response = nodeEndpoint.getDatanodes();
    datanodesResponse = (DatanodesResponse) response.getEntity();
    // Order of datanodes in the response is random
    AtomicInteger count = new AtomicInteger();
    datanodesResponse.getDatanodes().forEach(metadata -> {
        if (metadata.getUuid().equals(dnDetailsInternal.getUuidString())) {
            count.incrementAndGet();
            assertEquals(NodeOperationalState.DECOMMISSIONING, metadata.getOperationalState());
        }
    });
    assertEquals(1, count.get());
    // Restore state
    dnDetailsInternal.setPersistedOpState(backupOpState);
    dnDetailsInternal.setPersistedOpStateExpiryEpochSec(backupOpStateExpiry);
    nodeManager.setNodeOperationalState(dnDetailsInternal, backupOpState, backupOpStateExpiry);
}
Also used : ClusterStateResponse(org.apache.hadoop.ozone.recon.api.types.ClusterStateResponse) DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) Response(javax.ws.rs.core.Response) HttpServletResponse(javax.servlet.http.HttpServletResponse) PipelinesResponse(org.apache.hadoop.ozone.recon.api.types.PipelinesResponse) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DatanodeMetadata(org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeOperationalState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState) IOException(java.io.IOException) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) Test(org.junit.Test) AbstractReconSqlDBTest(org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest)

Aggregations

DatanodeMetadata (org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata)4 DatanodesResponse (org.apache.hadoop.ozone.recon.api.types.DatanodesResponse)3 IOException (java.io.IOException)2 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)2 HttpServletResponse (javax.servlet.http.HttpServletResponse)2 Response (javax.ws.rs.core.Response)2 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)2 NodeOperationalState (org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState)2 Test (org.junit.Test)2 ArrayList (java.util.ArrayList)1 GET (javax.ws.rs.GET)1 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)1 NodeState (org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState)1 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)1 DatanodeInfo (org.apache.hadoop.hdds.scm.node.DatanodeInfo)1 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)1 NodeStatus (org.apache.hadoop.hdds.scm.node.NodeStatus)1 NodeNotFoundException (org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException)1 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)1 PipelineID (org.apache.hadoop.hdds.scm.pipeline.PipelineID)1