Search in sources :

Example 16 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class ResourceTrackerService method registerNodeManager.

@SuppressWarnings("unchecked")
@Override
public RegisterNodeManagerResponse registerNodeManager(RegisterNodeManagerRequest request) throws YarnException, IOException {
    NodeId nodeId = request.getNodeId();
    String host = nodeId.getHost();
    int cmPort = nodeId.getPort();
    int httpPort = request.getHttpPort();
    Resource capability = request.getResource();
    String nodeManagerVersion = request.getNMVersion();
    Resource physicalResource = request.getPhysicalResource();
    RegisterNodeManagerResponse response = recordFactory.newRecordInstance(RegisterNodeManagerResponse.class);
    if (!minimumNodeManagerVersion.equals("NONE")) {
        if (minimumNodeManagerVersion.equals("EqualToRM")) {
            minimumNodeManagerVersion = YarnVersionInfo.getVersion();
        }
        if ((nodeManagerVersion == null) || (VersionUtil.compareVersions(nodeManagerVersion, minimumNodeManagerVersion)) < 0) {
            String message = "Disallowed NodeManager Version " + nodeManagerVersion + ", is less than the minimum version " + minimumNodeManagerVersion + " sending SHUTDOWN signal to " + "NodeManager.";
            LOG.info(message);
            response.setDiagnosticsMessage(message);
            response.setNodeAction(NodeAction.SHUTDOWN);
            return response;
        }
    }
    // Check if this node is a 'valid' node
    if (!this.nodesListManager.isValidNode(host) && !isNodeInDecommissioning(nodeId)) {
        String message = "Disallowed NodeManager from  " + host + ", Sending SHUTDOWN signal to the NodeManager.";
        LOG.info(message);
        response.setDiagnosticsMessage(message);
        response.setNodeAction(NodeAction.SHUTDOWN);
        return response;
    }
    // check if node's capacity is load from dynamic-resources.xml
    String nid = nodeId.toString();
    Resource dynamicLoadCapability = loadNodeResourceFromDRConfiguration(nid);
    if (dynamicLoadCapability != null) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Resource for node: " + nid + " is adjusted from: " + capability + " to: " + dynamicLoadCapability + " due to settings in dynamic-resources.xml.");
        }
        capability = dynamicLoadCapability;
        // sync back with new resource.
        response.setResource(capability);
    }
    // Check if this node has minimum allocations
    if (capability.getMemorySize() < minAllocMb || capability.getVirtualCores() < minAllocVcores) {
        String message = "NodeManager from  " + host + " doesn't satisfy minimum allocations, Sending SHUTDOWN" + " signal to the NodeManager.";
        LOG.info(message);
        response.setDiagnosticsMessage(message);
        response.setNodeAction(NodeAction.SHUTDOWN);
        return response;
    }
    response.setContainerTokenMasterKey(containerTokenSecretManager.getCurrentKey());
    response.setNMTokenMasterKey(nmTokenSecretManager.getCurrentKey());
    RMNode rmNode = new RMNodeImpl(nodeId, rmContext, host, cmPort, httpPort, resolve(host), capability, nodeManagerVersion, physicalResource);
    RMNode oldNode = this.rmContext.getRMNodes().putIfAbsent(nodeId, rmNode);
    if (oldNode == null) {
        this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeStartedEvent(nodeId, request.getNMContainerStatuses(), request.getRunningApplications()));
    } else {
        LOG.info("Reconnect from the node at: " + host);
        this.nmLivelinessMonitor.unregister(nodeId);
        // Reset heartbeat ID since node just restarted.
        oldNode.resetLastNodeHeartBeatResponse();
        this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeReconnectEvent(nodeId, rmNode, request.getRunningApplications(), request.getNMContainerStatuses()));
    }
    // On every node manager register we will be clearing NMToken keys if
    // present for any running application.
    this.nmTokenSecretManager.removeNodeKey(nodeId);
    this.nmLivelinessMonitor.register(nodeId);
    // RMNode inserted
    if (!rmContext.isWorkPreservingRecoveryEnabled()) {
        if (!request.getNMContainerStatuses().isEmpty()) {
            LOG.info("received container statuses on node manager register :" + request.getNMContainerStatuses());
            for (NMContainerStatus status : request.getNMContainerStatuses()) {
                handleNMContainerStatus(status, nodeId);
            }
        }
    }
    // Update node's labels to RM's NodeLabelManager.
    Set<String> nodeLabels = NodeLabelsUtils.convertToStringSet(request.getNodeLabels());
    if (isDistributedNodeLabelsConf && nodeLabels != null) {
        try {
            updateNodeLabelsFromNMReport(nodeLabels, nodeId);
            response.setAreNodeLabelsAcceptedByRM(true);
        } catch (IOException ex) {
            // Ensure the exception is captured in the response
            response.setDiagnosticsMessage(ex.getMessage());
            response.setAreNodeLabelsAcceptedByRM(false);
        }
    } else if (isDelegatedCentralizedNodeLabelsConf) {
        this.rmContext.getRMDelegatedNodeLabelsUpdater().updateNodeLabels(nodeId);
    }
    StringBuilder message = new StringBuilder();
    message.append("NodeManager from node ").append(host).append("(cmPort: ").append(cmPort).append(" httpPort: ");
    message.append(httpPort).append(") ").append("registered with capability: ").append(capability);
    message.append(", assigned nodeId ").append(nodeId);
    if (response.getAreNodeLabelsAcceptedByRM()) {
        message.append(", node labels { ").append(StringUtils.join(",", nodeLabels) + " } ");
    }
    LOG.info(message.toString());
    response.setNodeAction(NodeAction.NORMAL);
    response.setRMIdentifier(ResourceManager.getClusterTimeStamp());
    response.setRMVersion(YarnVersionInfo.getVersion());
    return response;
}
Also used : RMNodeReconnectEvent(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeReconnectEvent) Resource(org.apache.hadoop.yarn.api.records.Resource) IOException(java.io.IOException) RMNodeStartedEvent(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStartedEvent) RMNode(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode) NMContainerStatus(org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus) NodeId(org.apache.hadoop.yarn.api.records.NodeId) RegisterNodeManagerResponse(org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerResponse) UnRegisterNodeManagerResponse(org.apache.hadoop.yarn.server.api.protocolrecords.UnRegisterNodeManagerResponse) RMNodeImpl(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl)

Example 17 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class AdminService method checkForDecommissioningNodes.

@Override
public CheckForDecommissioningNodesResponse checkForDecommissioningNodes(CheckForDecommissioningNodesRequest checkForDecommissioningNodesRequest) throws IOException, YarnException {
    final String operation = "checkForDecommissioningNodes";
    final String msg = "check for decommissioning nodes.";
    UserGroupInformation user = checkAcls("checkForDecommissioningNodes");
    checkRMStatus(user.getShortUserName(), operation, msg);
    Set<NodeId> decommissioningNodes = rmContext.getNodesListManager().checkForDecommissioningNodes();
    RMAuditLogger.logSuccess(user.getShortUserName(), operation, "AdminService");
    CheckForDecommissioningNodesResponse response = recordFactory.newRecordInstance(CheckForDecommissioningNodesResponse.class);
    response.setDecommissioningNodes(decommissioningNodes);
    return response;
}
Also used : NodeId(org.apache.hadoop.yarn.api.records.NodeId) CheckForDecommissioningNodesResponse(org.apache.hadoop.yarn.server.api.protocolrecords.CheckForDecommissioningNodesResponse) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 18 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class RMWebServices method getLabelsToNodes.

@GET
@Path("/label-mappings")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public LabelsToNodesInfo getLabelsToNodes(@QueryParam("labels") Set<String> labels) throws IOException {
    init();
    LabelsToNodesInfo lts = new LabelsToNodesInfo();
    Map<NodeLabelInfo, NodeIDsInfo> ltsMap = lts.getLabelsToNodes();
    Map<NodeLabel, Set<NodeId>> labelsToNodeId = null;
    if (labels == null || labels.size() == 0) {
        labelsToNodeId = rm.getRMContext().getNodeLabelManager().getLabelsInfoToNodes();
    } else {
        labelsToNodeId = rm.getRMContext().getNodeLabelManager().getLabelsInfoToNodes(labels);
    }
    for (Entry<NodeLabel, Set<NodeId>> entry : labelsToNodeId.entrySet()) {
        List<String> nodeIdStrList = new ArrayList<String>();
        for (NodeId nodeId : entry.getValue()) {
            nodeIdStrList.add(nodeId.toString());
        }
        ltsMap.put(new NodeLabelInfo(entry.getKey()), new NodeIDsInfo(nodeIdStrList));
    }
    return lts;
}
Also used : LabelsToNodesInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.LabelsToNodesInfo) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) EnumSet(java.util.EnumSet) Set(java.util.Set) HashSet(java.util.HashSet) NodeLabelInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelInfo) ArrayList(java.util.ArrayList) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Example 19 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class RMWebServices method replaceLabelsOnNode.

@POST
@Path("/nodes/{nodeId}/replace-labels")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public Response replaceLabelsOnNode(@QueryParam("labels") Set<String> newNodeLabelsName, @Context HttpServletRequest hsr, @PathParam("nodeId") String nodeId) throws Exception {
    NodeId nid = ConverterUtils.toNodeIdWithDefaultPort(nodeId);
    Map<NodeId, Set<String>> newLabelsForNode = new HashMap<NodeId, Set<String>>();
    newLabelsForNode.put(nid, new HashSet<String>(newNodeLabelsName));
    return replaceLabelsOnNode(newLabelsForNode, hsr, "/nodes/nodeid/replace-labels");
}
Also used : EnumSet(java.util.EnumSet) Set(java.util.Set) HashSet(java.util.HashSet) HashMap(java.util.HashMap) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Path(javax.ws.rs.Path) POST(javax.ws.rs.POST) Produces(javax.ws.rs.Produces)

Example 20 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class RMWebServices method getNodeToLabels.

@GET
@Path("/get-node-to-labels")
@Produces({ MediaType.APPLICATION_JSON + "; " + JettyUtils.UTF_8, MediaType.APPLICATION_XML + "; " + JettyUtils.UTF_8 })
public NodeToLabelsInfo getNodeToLabels(@Context HttpServletRequest hsr) throws IOException {
    init();
    NodeToLabelsInfo ntl = new NodeToLabelsInfo();
    HashMap<String, NodeLabelsInfo> ntlMap = ntl.getNodeToLabels();
    Map<NodeId, Set<NodeLabel>> nodeIdToLabels = rm.getRMContext().getNodeLabelManager().getNodeLabelsInfo();
    for (Map.Entry<NodeId, Set<NodeLabel>> nitle : nodeIdToLabels.entrySet()) {
        List<NodeLabel> labels = new ArrayList<NodeLabel>(nitle.getValue());
        ntlMap.put(nitle.getKey().toString(), new NodeLabelsInfo(labels));
    }
    return ntl;
}
Also used : EnumSet(java.util.EnumSet) Set(java.util.Set) HashSet(java.util.HashSet) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) NodeLabelsInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ArrayList(java.util.ArrayList) NodeToLabelsInfo(org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeToLabelsInfo) Map(java.util.Map) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) Path(javax.ws.rs.Path) Produces(javax.ws.rs.Produces) GET(javax.ws.rs.GET)

Aggregations

NodeId (org.apache.hadoop.yarn.api.records.NodeId)257 Test (org.junit.Test)137 Resource (org.apache.hadoop.yarn.api.records.Resource)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)74 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)59 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)46 Container (org.apache.hadoop.yarn.api.records.Container)44 ArrayList (java.util.ArrayList)43 Configuration (org.apache.hadoop.conf.Configuration)40 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)40 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)40 HashSet (java.util.HashSet)39 Set (java.util.Set)36 HashMap (java.util.HashMap)35 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)34 Priority (org.apache.hadoop.yarn.api.records.Priority)32 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)31 IOException (java.io.IOException)29 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)29 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)28