Search in sources :

Example 86 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class TestDistributedShellWithNodeLabels method initializeNodeLabels.

private void initializeNodeLabels() throws IOException {
    RMContext rmContext = distShellTest.yarnCluster.getResourceManager(0).getRMContext();
    // Setup node labels
    RMNodeLabelsManager labelsMgr = rmContext.getNodeLabelManager();
    Set<String> labels = new HashSet<String>();
    labels.add("x");
    labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(labels);
    // Setup queue access to node labels
    distShellTest.conf.set(PREFIX + "root.accessible-node-labels", "x");
    distShellTest.conf.set(PREFIX + "root.accessible-node-labels.x.capacity", "100");
    distShellTest.conf.set(PREFIX + "root.default.accessible-node-labels", "x");
    distShellTest.conf.set(PREFIX + "root.default.accessible-node-labels.x.capacity", "100");
    rmContext.getScheduler().reinitialize(distShellTest.conf, rmContext);
    // Fetch node-ids from yarn cluster
    NodeId[] nodeIds = new NodeId[NUM_NMS];
    for (int i = 0; i < NUM_NMS; i++) {
        NodeManager mgr = distShellTest.yarnCluster.getNodeManager(i);
        nodeIds[i] = mgr.getNMContext().getNodeId();
    }
    // Set label x to NM[1]
    labelsMgr.addLabelsToNode(ImmutableMap.of(nodeIds[1], labels));
}
Also used : RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) NodeManager(org.apache.hadoop.yarn.server.nodemanager.NodeManager) NodeId(org.apache.hadoop.yarn.api.records.NodeId) RMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager) HashSet(java.util.HashSet)

Example 87 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class CheckForDecommissioningNodesResponsePBImpl method addDecommissioningNodesToProto.

private void addDecommissioningNodesToProto() {
    maybeInitBuilder();
    builder.clearDecommissioningNodes();
    if (this.decommissioningNodes == null)
        return;
    Set<NodeIdProto> nodeIdProtos = new HashSet<NodeIdProto>();
    for (NodeId nodeId : decommissioningNodes) {
        nodeIdProtos.add(convertToProtoFormat(nodeId));
    }
    builder.addAllDecommissioningNodes(nodeIdProtos);
}
Also used : NodeId(org.apache.hadoop.yarn.api.records.NodeId) NodeIdProto(org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto) HashSet(java.util.HashSet)

Example 88 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class CheckForDecommissioningNodesResponsePBImpl method initNodesDecommissioning.

private void initNodesDecommissioning() {
    if (this.decommissioningNodes != null) {
        return;
    }
    CheckForDecommissioningNodesResponseProtoOrBuilder p = viaProto ? proto : builder;
    List<NodeIdProto> nodeIds = p.getDecommissioningNodesList();
    this.decommissioningNodes = new HashSet<NodeId>();
    for (NodeIdProto nodeIdProto : nodeIds) {
        this.decommissioningNodes.add(convertFromProtoFormat(nodeIdProto));
    }
}
Also used : CheckForDecommissioningNodesResponseProtoOrBuilder(org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.CheckForDecommissioningNodesResponseProtoOrBuilder) NodeId(org.apache.hadoop.yarn.api.records.NodeId) NodeIdProto(org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto)

Example 89 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class UpdateNodeResourceRequestPBImpl method addNodeResourceMap.

private void addNodeResourceMap() {
    maybeInitBuilder();
    builder.clearNodeResourceMap();
    if (nodeResourceMap == null) {
        return;
    }
    Iterable<? extends NodeResourceMapProto> values = new Iterable<NodeResourceMapProto>() {

        @Override
        public Iterator<NodeResourceMapProto> iterator() {
            return new Iterator<NodeResourceMapProto>() {

                Iterator<NodeId> nodeIterator = nodeResourceMap.keySet().iterator();

                @Override
                public boolean hasNext() {
                    return nodeIterator.hasNext();
                }

                @Override
                public NodeResourceMapProto next() {
                    NodeId nodeId = nodeIterator.next();
                    return NodeResourceMapProto.newBuilder().setNodeId(convertToProtoFormat(nodeId)).setResourceOption(convertToProtoFormat(nodeResourceMap.get(nodeId))).build();
                }

                @Override
                public void remove() {
                    throw new UnsupportedOperationException();
                }
            };
        }
    };
    this.builder.addAllNodeResourceMap(values);
}
Also used : NodeResourceMapProto(org.apache.hadoop.yarn.proto.YarnProtos.NodeResourceMapProto) Iterator(java.util.Iterator) NodeId(org.apache.hadoop.yarn.api.records.NodeId)

Example 90 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class CommonNodeLabelsManager method checkRemoveLabelsFromNode.

protected void checkRemoveLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode) throws IOException {
    // check all labels being added existed
    Set<String> knownLabels = labelCollections.keySet();
    for (Entry<NodeId, Set<String>> entry : removeLabelsFromNode.entrySet()) {
        NodeId nodeId = entry.getKey();
        Set<String> labels = entry.getValue();
        if (!knownLabels.containsAll(labels)) {
            String msg = "Not all labels being removed contained by known " + "label collections, please check" + ", removed labels=[" + StringUtils.join(labels, ",") + "]";
            LOG.error(msg);
            throw new IOException(msg);
        }
        Set<String> originalLabels = null;
        boolean nodeExisted = false;
        if (WILDCARD_PORT != nodeId.getPort()) {
            Node nm = getNMInNodeSet(nodeId);
            if (nm != null) {
                originalLabels = nm.labels;
                nodeExisted = true;
            }
        } else {
            Host host = nodeCollections.get(nodeId.getHost());
            if (null != host) {
                originalLabels = host.labels;
                nodeExisted = true;
            }
        }
        if (!nodeExisted) {
            String msg = "Try to remove labels from NM=" + nodeId + ", but the NM doesn't existed";
            LOG.error(msg);
            throw new IOException(msg);
        }
        // the labels will never be null
        if (labels.isEmpty()) {
            continue;
        }
        // because when a Node is created, Node.labels can be null.
        if (originalLabels == null || !originalLabels.containsAll(labels)) {
            String msg = "Try to remove labels = [" + StringUtils.join(labels, ",") + "], but not all labels contained by NM=" + nodeId;
            LOG.error(msg);
            throw new IOException(msg);
        }
    }
}
Also used : HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) IOException(java.io.IOException)

Aggregations

NodeId (org.apache.hadoop.yarn.api.records.NodeId)257 Test (org.junit.Test)137 Resource (org.apache.hadoop.yarn.api.records.Resource)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)74 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)59 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)46 Container (org.apache.hadoop.yarn.api.records.Container)44 ArrayList (java.util.ArrayList)43 Configuration (org.apache.hadoop.conf.Configuration)40 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)40 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)40 HashSet (java.util.HashSet)39 Set (java.util.Set)36 HashMap (java.util.HashMap)35 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)34 Priority (org.apache.hadoop.yarn.api.records.Priority)32 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)31 IOException (java.io.IOException)29 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)29 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)28