Search in sources :

Example 1 with DatanodeStorageReport

use of org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport in project ozone by apache.

the class ClusterStateEndpoint method getClusterState.

/**
 * Return a summary report on current cluster state.
 * @return {@link Response}
 */
@GET
public Response getClusterState() {
    List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
    int containers = this.containerManager.getContainers().size();
    int pipelines = this.pipelineManager.getPipelines().size();
    int healthyDatanodes = nodeManager.getNodeCount(NodeStatus.inServiceHealthy()) + nodeManager.getNodeCount(NodeStatus.inServiceHealthyReadOnly());
    SCMNodeStat stats = nodeManager.getStats();
    DatanodeStorageReport storageReport = new DatanodeStorageReport(stats.getCapacity().get(), stats.getScmUsed().get(), stats.getRemaining().get());
    ClusterStateResponse.Builder builder = ClusterStateResponse.newBuilder();
    GlobalStats volumeRecord = globalStatsDao.findById(TableCountTask.getRowKeyFromTable(VOLUME_TABLE));
    GlobalStats bucketRecord = globalStatsDao.findById(TableCountTask.getRowKeyFromTable(BUCKET_TABLE));
    // Keys from OBJECT_STORE buckets.
    GlobalStats keyRecord = globalStatsDao.findById(TableCountTask.getRowKeyFromTable(KEY_TABLE));
    // Keys from FILE_SYSTEM_OPTIMIZED buckets
    GlobalStats fileRecord = globalStatsDao.findById(TableCountTask.getRowKeyFromTable(FILE_TABLE));
    if (volumeRecord != null) {
        builder.setVolumes(volumeRecord.getValue());
    }
    if (bucketRecord != null) {
        builder.setBuckets(bucketRecord.getValue());
    }
    Long totalKeys = 0L;
    if (keyRecord != null) {
        totalKeys += keyRecord.getValue();
    }
    if (fileRecord != null) {
        totalKeys += fileRecord.getValue();
    }
    builder.setKeys(totalKeys);
    ClusterStateResponse response = builder.setStorageReport(storageReport).setPipelines(pipelines).setContainers(containers).setTotalDatanodes(datanodeDetails.size()).setHealthyDatanodes(healthyDatanodes).build();
    return Response.ok(response).build();
}
Also used : GlobalStats(org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats) DatanodeStorageReport(org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ClusterStateResponse(org.apache.hadoop.ozone.recon.api.types.ClusterStateResponse) SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat) GET(javax.ws.rs.GET)

Example 2 with DatanodeStorageReport

use of org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport in project ozone by apache.

the class NodeEndpoint method getDatanodes.

/**
 * Return the list of datanodes with detailed information about each datanode.
 * @return {@link Response}
 */
@GET
public Response getDatanodes() {
    List<DatanodeMetadata> datanodes = new ArrayList<>();
    List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
    datanodeDetails.forEach(datanode -> {
        DatanodeStorageReport storageReport = getStorageReport(datanode);
        NodeState nodeState = null;
        try {
            nodeState = nodeManager.getNodeStatus(datanode).getHealth();
        } catch (NodeNotFoundException e) {
            LOG.warn("Cannot get nodeState for datanode {}", datanode, e);
        }
        final NodeOperationalState nodeOpState = datanode.getPersistedOpState();
        String hostname = datanode.getHostName();
        Set<PipelineID> pipelineIDs = nodeManager.getPipelines(datanode);
        List<DatanodePipeline> pipelines = new ArrayList<>();
        AtomicInteger leaderCount = new AtomicInteger();
        AtomicInteger openContainers = new AtomicInteger();
        DatanodeMetadata.Builder builder = DatanodeMetadata.newBuilder();
        pipelineIDs.forEach(pipelineID -> {
            try {
                Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
                String leaderNode = pipeline.getLeaderNode().getHostName();
                DatanodePipeline datanodePipeline = new DatanodePipeline(pipelineID.getId(), pipeline.getReplicationConfig().getReplicationType().toString(), ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()).getNumber(), leaderNode);
                pipelines.add(datanodePipeline);
                if (datanode.getUuid().equals(pipeline.getLeaderId())) {
                    leaderCount.getAndIncrement();
                }
                int openContainerPerPipeline = reconContainerManager.getPipelineToOpenContainer().getOrDefault(pipelineID, 0);
                openContainers.getAndAdd(openContainerPerPipeline);
            } catch (PipelineNotFoundException ex) {
                LOG.warn("Cannot get pipeline {} for datanode {}, pipeline not found", pipelineID.getId(), hostname, ex);
            } catch (IOException ioEx) {
                LOG.warn("Cannot get leader node of pipeline with id {}.", pipelineID.getId(), ioEx);
            }
        });
        try {
            Set<ContainerID> allContainers = nodeManager.getContainers(datanode);
            builder.withContainers(allContainers.size());
            builder.withOpenContainers(openContainers.get());
        } catch (NodeNotFoundException ex) {
            LOG.warn("Cannot get containers, datanode {} not found.", datanode.getUuid(), ex);
        }
        DatanodeInfo dnInfo = (DatanodeInfo) datanode;
        datanodes.add(builder.withHostname(nodeManager.getHostName(datanode)).withDatanodeStorageReport(storageReport).withLastHeartbeat(nodeManager.getLastHeartbeat(datanode)).withState(nodeState).withOperationalState(nodeOpState).withPipelines(pipelines).withLeaderCount(leaderCount.get()).withUUid(datanode.getUuidString()).withVersion(nodeManager.getVersion(datanode)).withSetupTime(nodeManager.getSetupTime(datanode)).withRevision(nodeManager.getRevision(datanode)).withBuildDate(nodeManager.getBuildDate(datanode)).withLayoutVersion(dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion()).build());
    });
    DatanodesResponse datanodesResponse = new DatanodesResponse(datanodes.size(), datanodes);
    return Response.ok(datanodesResponse).build();
}
Also used : DatanodeInfo(org.apache.hadoop.hdds.scm.node.DatanodeInfo) NodeState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState) DatanodesResponse(org.apache.hadoop.ozone.recon.api.types.DatanodesResponse) DatanodeStorageReport(org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport) DatanodeMetadata(org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata) ArrayList(java.util.ArrayList) IOException(java.io.IOException) DatanodePipeline(org.apache.hadoop.ozone.recon.api.types.DatanodePipeline) DatanodePipeline(org.apache.hadoop.ozone.recon.api.types.DatanodePipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) NodeOperationalState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) PipelineNotFoundException(org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException) GET(javax.ws.rs.GET)

Example 3 with DatanodeStorageReport

use of org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport in project ozone by apache.

the class NodeEndpoint method getStorageReport.

/**
 * Returns DatanodeStorageReport for the given Datanode.
 * @param datanode DatanodeDetails
 * @return DatanodeStorageReport
 */
private DatanodeStorageReport getStorageReport(DatanodeDetails datanode) {
    SCMNodeStat nodeStat = nodeManager.getNodeStat(datanode).get();
    long capacity = nodeStat.getCapacity().get();
    long used = nodeStat.getScmUsed().get();
    long remaining = nodeStat.getRemaining().get();
    return new DatanodeStorageReport(capacity, used, remaining);
}
Also used : DatanodeStorageReport(org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport) SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat)

Aggregations

DatanodeStorageReport (org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport)3 GET (javax.ws.rs.GET)2 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)2 SCMNodeStat (org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat)2 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 NodeOperationalState (org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState)1 NodeState (org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState)1 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)1 DatanodeInfo (org.apache.hadoop.hdds.scm.node.DatanodeInfo)1 NodeNotFoundException (org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException)1 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)1 PipelineID (org.apache.hadoop.hdds.scm.pipeline.PipelineID)1 PipelineNotFoundException (org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException)1 ClusterStateResponse (org.apache.hadoop.ozone.recon.api.types.ClusterStateResponse)1 DatanodeMetadata (org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata)1 DatanodePipeline (org.apache.hadoop.ozone.recon.api.types.DatanodePipeline)1 DatanodesResponse (org.apache.hadoop.ozone.recon.api.types.DatanodesResponse)1 GlobalStats (org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats)1