Search in sources :

Example 26 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class TestContainerServer method testClientServer.

@Test
public void testClientServer() throws Exception {
    DatanodeDetails datanodeDetails = randomDatanodeDetails();
    runTestClientServer(1, (pipeline, conf) -> conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode().getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(datanodeDetails, conf, new TestContainerDispatcher(), caClient), (dn, p) -> {
    });
}
Also used : MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) XceiverClientGrpc(org.apache.hadoop.hdds.scm.XceiverClientGrpc) XceiverServerGrpc(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc) Test(org.junit.Test)

Example 27 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class PipelinePlacementPolicy method getResultSet.

/**
 * Get result set based on the pipeline placement algorithm which considers
 * network topology and rack awareness.
 * @param nodesRequired - Nodes Required
 * @param healthyNodes - List of Nodes in the result set.
 * @return a list of datanodes
 * @throws SCMException SCMException
 */
@Override
public List<DatanodeDetails> getResultSet(int nodesRequired, List<DatanodeDetails> healthyNodes) throws SCMException {
    if (nodesRequired != HddsProtos.ReplicationFactor.THREE.getNumber()) {
        throw new SCMException("Nodes required number is not supported: " + nodesRequired, SCMException.ResultCodes.INVALID_CAPACITY);
    }
    // Assume rack awareness is not enabled.
    boolean rackAwareness = false;
    List<DatanodeDetails> results = new ArrayList<>(nodesRequired);
    // Since nodes are widely distributed, the results should be selected
    // base on distance in topology, rack awareness and load balancing.
    List<DatanodeDetails> exclude = new ArrayList<>();
    // First choose an anchor node.
    DatanodeDetails anchor = chooseFirstNode(healthyNodes);
    if (anchor != null) {
        results.add(anchor);
        removePeers(anchor, healthyNodes);
        exclude.add(anchor);
    } else {
        LOG.warn("Unable to find healthy node for anchor(first) node.");
        throw new SCMException("Unable to find anchor node.", SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("First node chosen: {}", anchor);
    }
    // Choose the second node on different racks from anchor.
    DatanodeDetails nextNode = chooseNodeBasedOnRackAwareness(healthyNodes, exclude, nodeManager.getClusterNetworkTopologyMap(), anchor);
    if (nextNode != null) {
        // Rack awareness is detected.
        rackAwareness = true;
        results.add(nextNode);
        removePeers(nextNode, healthyNodes);
        exclude.add(nextNode);
        if (LOG.isDebugEnabled()) {
            LOG.debug("Second node chosen: {}", nextNode);
        }
    } else {
        LOG.debug("Pipeline Placement: Unable to find 2nd node on different " + "rack based on rack awareness. anchor: {}", anchor);
    }
    // Then choose nodes close to anchor based on network topology
    int nodesToFind = nodesRequired - results.size();
    for (int x = 0; x < nodesToFind; x++) {
        // Pick remaining nodes based on the existence of rack awareness.
        DatanodeDetails pick = null;
        if (rackAwareness) {
            pick = chooseNodeBasedOnSameRack(healthyNodes, exclude, nodeManager.getClusterNetworkTopologyMap(), anchor);
        }
        // fall back protection
        if (pick == null) {
            pick = fallBackPickNodes(healthyNodes, exclude);
            if (rackAwareness) {
                LOG.debug("Failed to choose node based on topology. Fallback " + "picks node as: {}", pick);
            }
        }
        if (pick != null) {
            results.add(pick);
            removePeers(pick, healthyNodes);
            exclude.add(pick);
            LOG.debug("Remaining node chosen: {}", pick);
        } else {
            String msg = String.format("Unable to find suitable node in " + "pipeline allocation. healthyNodes size: %d, " + "excludeNodes size: %d", healthyNodes.size(), exclude.size());
            LOG.warn(msg);
            throw new SCMException(msg, SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
        }
    }
    if (results.size() < nodesRequired) {
        LOG.warn("Unable to find the required number of " + "healthy nodes that  meet the criteria. Required nodes: {}, " + "Found nodes: {}", nodesRequired, results.size());
        throw new SCMException("Unable to find required number of nodes.", SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
    }
    return results;
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ArrayList(java.util.ArrayList) SCMException(org.apache.hadoop.hdds.scm.exceptions.SCMException)

Example 28 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class PipelineReportHandler method onMessage.

@Override
public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, EventPublisher publisher) {
    Preconditions.checkNotNull(pipelineReportFromDatanode);
    DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails();
    PipelineReportsProto pipelineReport = pipelineReportFromDatanode.getReport();
    Preconditions.checkNotNull(dn, "Pipeline Report is missing DatanodeDetails.");
    if (LOGGER.isTraceEnabled()) {
        LOGGER.trace("Processing pipeline report for dn: {}", dn);
    }
    for (PipelineReport report : pipelineReport.getPipelineReportList()) {
        try {
            processPipelineReport(report, dn, publisher);
        } catch (NotLeaderException ex) {
        // Avoid NotLeaderException logging which happens when processing
        // pipeline report on followers.
        } catch (PipelineNotFoundException e) {
            LOGGER.error("Could not find pipeline {}", report.getPipelineID());
        } catch (IOException e) {
            LOGGER.error("Could not process pipeline report={} from dn={}.", report, dn, e);
        }
    }
}
Also used : PipelineReport(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport) NotLeaderException(org.apache.ratis.protocol.exceptions.NotLeaderException) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) IOException(java.io.IOException) PipelineReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto)

Example 29 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class NodeReportHandler method onMessage.

@Override
public void onMessage(NodeReportFromDatanode nodeReportFromDatanode, EventPublisher publisher) {
    Preconditions.checkNotNull(nodeReportFromDatanode);
    DatanodeDetails dn = nodeReportFromDatanode.getDatanodeDetails();
    Preconditions.checkNotNull(dn, "NodeReport is " + "missing DatanodeDetails.");
    nodeManager.processNodeReport(dn, nodeReportFromDatanode.getReport());
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails)

Example 30 with DatanodeDetails

use of org.apache.hadoop.hdds.protocol.DatanodeDetails in project ozone by apache.

the class SCMNodeManager method getMostOrLeastUsedDatanodes.

/**
 * Gets a sorted list of most or least used DatanodeUsageInfo containing
 * healthy, in-service nodes. If the specified mostUsed is true, the returned
 * list is in descending order of usage. Otherwise, the returned list is in
 * ascending order of usage.
 *
 * @param mostUsed true if most used, false if least used
 * @return List of DatanodeUsageInfo
 */
@Override
public List<DatanodeUsageInfo> getMostOrLeastUsedDatanodes(boolean mostUsed) {
    List<DatanodeDetails> healthyNodes = getNodes(IN_SERVICE, NodeState.HEALTHY);
    List<DatanodeUsageInfo> datanodeUsageInfoList = new ArrayList<>(healthyNodes.size());
    // list
    for (DatanodeDetails node : healthyNodes) {
        SCMNodeStat stat = getNodeStatInternal(node);
        datanodeUsageInfoList.add(new DatanodeUsageInfo(node, stat));
    }
    // sort the list according to appropriate comparator
    if (mostUsed) {
        datanodeUsageInfoList.sort(DatanodeUsageInfo.getMostUtilized().reversed());
    } else {
        datanodeUsageInfoList.sort(DatanodeUsageInfo.getMostUtilized());
    }
    return datanodeUsageInfoList;
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) ArrayList(java.util.ArrayList) SCMNodeStat(org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat)

Aggregations

DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)358 Test (org.junit.Test)203 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)108 ArrayList (java.util.ArrayList)84 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)77 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)65 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)62 IOException (java.io.IOException)48 UUID (java.util.UUID)43 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)43 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)38 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)32 Map (java.util.Map)27 HashMap (java.util.HashMap)26 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)25 List (java.util.List)24 File (java.io.File)23 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)21 NodeNotFoundException (org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException)21 NodeStatus (org.apache.hadoop.hdds.scm.node.NodeStatus)20