use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class TestDistributedShellWithNodeLabels method initializeNodeLabels.
private void initializeNodeLabels() throws IOException {
RMContext rmContext = distShellTest.yarnCluster.getResourceManager(0).getRMContext();
// Setup node labels
RMNodeLabelsManager labelsMgr = rmContext.getNodeLabelManager();
Set<String> labels = new HashSet<String>();
labels.add("x");
labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(labels);
// Setup queue access to node labels
distShellTest.conf.set(PREFIX + "root.accessible-node-labels", "x");
distShellTest.conf.set(PREFIX + "root.accessible-node-labels.x.capacity", "100");
distShellTest.conf.set(PREFIX + "root.default.accessible-node-labels", "x");
distShellTest.conf.set(PREFIX + "root.default.accessible-node-labels.x.capacity", "100");
rmContext.getScheduler().reinitialize(distShellTest.conf, rmContext);
// Fetch node-ids from yarn cluster
NodeId[] nodeIds = new NodeId[NUM_NMS];
for (int i = 0; i < NUM_NMS; i++) {
NodeManager mgr = distShellTest.yarnCluster.getNodeManager(i);
nodeIds[i] = mgr.getNMContext().getNodeId();
}
// Set label x to NM[1]
labelsMgr.addLabelsToNode(ImmutableMap.of(nodeIds[1], labels));
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class CheckForDecommissioningNodesResponsePBImpl method addDecommissioningNodesToProto.
private void addDecommissioningNodesToProto() {
maybeInitBuilder();
builder.clearDecommissioningNodes();
if (this.decommissioningNodes == null)
return;
Set<NodeIdProto> nodeIdProtos = new HashSet<NodeIdProto>();
for (NodeId nodeId : decommissioningNodes) {
nodeIdProtos.add(convertToProtoFormat(nodeId));
}
builder.addAllDecommissioningNodes(nodeIdProtos);
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class CheckForDecommissioningNodesResponsePBImpl method initNodesDecommissioning.
private void initNodesDecommissioning() {
if (this.decommissioningNodes != null) {
return;
}
CheckForDecommissioningNodesResponseProtoOrBuilder p = viaProto ? proto : builder;
List<NodeIdProto> nodeIds = p.getDecommissioningNodesList();
this.decommissioningNodes = new HashSet<NodeId>();
for (NodeIdProto nodeIdProto : nodeIds) {
this.decommissioningNodes.add(convertFromProtoFormat(nodeIdProto));
}
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class UpdateNodeResourceRequestPBImpl method addNodeResourceMap.
private void addNodeResourceMap() {
maybeInitBuilder();
builder.clearNodeResourceMap();
if (nodeResourceMap == null) {
return;
}
Iterable<? extends NodeResourceMapProto> values = new Iterable<NodeResourceMapProto>() {
@Override
public Iterator<NodeResourceMapProto> iterator() {
return new Iterator<NodeResourceMapProto>() {
Iterator<NodeId> nodeIterator = nodeResourceMap.keySet().iterator();
@Override
public boolean hasNext() {
return nodeIterator.hasNext();
}
@Override
public NodeResourceMapProto next() {
NodeId nodeId = nodeIterator.next();
return NodeResourceMapProto.newBuilder().setNodeId(convertToProtoFormat(nodeId)).setResourceOption(convertToProtoFormat(nodeResourceMap.get(nodeId))).build();
}
@Override
public void remove() {
throw new UnsupportedOperationException();
}
};
}
};
this.builder.addAllNodeResourceMap(values);
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class CommonNodeLabelsManager method checkRemoveLabelsFromNode.
protected void checkRemoveLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode) throws IOException {
// check all labels being added existed
Set<String> knownLabels = labelCollections.keySet();
for (Entry<NodeId, Set<String>> entry : removeLabelsFromNode.entrySet()) {
NodeId nodeId = entry.getKey();
Set<String> labels = entry.getValue();
if (!knownLabels.containsAll(labels)) {
String msg = "Not all labels being removed contained by known " + "label collections, please check" + ", removed labels=[" + StringUtils.join(labels, ",") + "]";
LOG.error(msg);
throw new IOException(msg);
}
Set<String> originalLabels = null;
boolean nodeExisted = false;
if (WILDCARD_PORT != nodeId.getPort()) {
Node nm = getNMInNodeSet(nodeId);
if (nm != null) {
originalLabels = nm.labels;
nodeExisted = true;
}
} else {
Host host = nodeCollections.get(nodeId.getHost());
if (null != host) {
originalLabels = host.labels;
nodeExisted = true;
}
}
if (!nodeExisted) {
String msg = "Try to remove labels from NM=" + nodeId + ", but the NM doesn't existed";
LOG.error(msg);
throw new IOException(msg);
}
// the labels will never be null
if (labels.isEmpty()) {
continue;
}
// because when a Node is created, Node.labels can be null.
if (originalLabels == null || !originalLabels.containsAll(labels)) {
String msg = "Try to remove labels = [" + StringUtils.join(labels, ",") + "], but not all labels contained by NM=" + nodeId;
LOG.error(msg);
throw new IOException(msg);
}
}
}
Aggregations