use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.
the class GetClusterNodeLabelsResponsePBImpl method getNodeLabels.
/**
* @deprecated Use {@link #getNodeLabelList()} instead.
*/
@Override
@Deprecated
public synchronized Set<String> getNodeLabels() {
Set<String> set = new HashSet<>();
List<NodeLabel> labelList = getNodeLabelList();
if (labelList != null) {
for (NodeLabel label : labelList) {
set.add(label.getName());
}
}
return set;
}
use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.
the class FileSystemNodeLabelsStore method loadFromMirror.
protected void loadFromMirror(Path newMirrorPath, Path oldMirrorPath) throws IOException {
// If mirror.new exists, read from mirror.new,
FSDataInputStream is = null;
try {
is = fs.open(newMirrorPath);
} catch (FileNotFoundException e) {
try {
is = fs.open(oldMirrorPath);
} catch (FileNotFoundException ignored) {
}
}
if (null != is) {
List<NodeLabel> labels = new AddToClusterNodeLabelsRequestPBImpl(AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)).getNodeLabels();
mgr.addToCluserNodeLabels(labels);
if (mgr.isCentralizedConfiguration()) {
// Only load node to labels mapping while using centralized configuration
Map<NodeId, Set<String>> nodeToLabels = new ReplaceLabelsOnNodeRequestPBImpl(ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is)).getNodeToLabels();
mgr.replaceLabelsOnNode(nodeToLabels);
}
is.close();
}
}
use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.
the class NonAppendableFSNodeLabelStore method writeNewMirror.
private void writeNewMirror() throws IOException {
ReentrantReadWriteLock.ReadLock readLock = mgr.readLock;
try {
// Acquire readlock to make sure we get cluster node labels and
// node-to-labels mapping atomically.
readLock.lock();
List<NodeLabel> nodeLabels = mgr.getClusterNodeLabels();
Map<NodeId, Set<String>> nodeToLabels = mgr.getNodeLabels();
// Write mirror to mirror.new.tmp file
Path newTmpPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new.tmp");
FSDataOutputStream os = fs.create(newTmpPath, true);
((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest.newInstance(nodeLabels)).getProto().writeDelimitedTo(os);
if (mgr.isCentralizedConfiguration()) {
// Only save node-to-labels mapping while using centralized configuration
((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest.newInstance(nodeToLabels)).getProto().writeDelimitedTo(os);
}
os.close();
// Rename mirror.new.tmp to mirror.new (will remove .new if it's existed)
Path newPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new");
fs.delete(newPath, false);
fs.rename(newTmpPath, newPath);
// Remove existing mirror and rename mirror.new to mirror
Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
fs.delete(mirrorPath, false);
fs.rename(newPath, mirrorPath);
} finally {
readLock.unlock();
}
}
use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.
the class NodeHeartbeatRequestPBImpl method initNodeLabels.
private void initNodeLabels() {
if (this.labels != null) {
return;
}
NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasNodeLabels()) {
labels = null;
return;
}
NodeLabelsProto nodeLabels = p.getNodeLabels();
labels = new HashSet<NodeLabel>();
for (NodeLabelProto nlp : nodeLabels.getNodeLabelsList()) {
labels.add(convertFromProtoFormat(nlp));
}
}
use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.
the class TestClientRMService method testGetNodeLabels.
@Test
public void testGetNodeLabels() throws Exception {
MockRM rm = new MockRM() {
protected ClientRMService createClientRMService() {
return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.getRMContext().getRMDelegationTokenSecretManager());
}
;
};
rm.start();
NodeLabel labelX = NodeLabel.newInstance("x", false);
NodeLabel labelY = NodeLabel.newInstance("y");
RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager();
labelsMgr.addToCluserNodeLabels(ImmutableSet.of(labelX, labelY));
NodeId node1 = NodeId.newInstance("host1", 1234);
NodeId node2 = NodeId.newInstance("host2", 1234);
Map<NodeId, Set<String>> map = new HashMap<NodeId, Set<String>>();
map.put(node1, ImmutableSet.of("x"));
map.put(node2, ImmutableSet.of("y"));
labelsMgr.replaceLabelsOnNode(map);
// Create a client.
Configuration conf = new Configuration();
YarnRPC rpc = YarnRPC.create(conf);
InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
LOG.info("Connecting to ResourceManager at " + rmAddress);
ApplicationClientProtocol client = (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, conf);
// Get node labels collection
GetClusterNodeLabelsResponse response = client.getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
Assert.assertTrue(response.getNodeLabelList().containsAll(Arrays.asList(labelX, labelY)));
// Get node labels mapping
GetNodesToLabelsResponse response1 = client.getNodeToLabels(GetNodesToLabelsRequest.newInstance());
Map<NodeId, Set<String>> nodeToLabels = response1.getNodeToLabels();
Assert.assertTrue(nodeToLabels.keySet().containsAll(Arrays.asList(node1, node2)));
Assert.assertTrue(nodeToLabels.get(node1).containsAll(Arrays.asList(labelX.getName())));
Assert.assertTrue(nodeToLabels.get(node2).containsAll(Arrays.asList(labelY.getName())));
// Below label "x" is not present in the response as exclusivity is true
Assert.assertFalse(nodeToLabels.get(node1).containsAll(Arrays.asList(NodeLabel.newInstance("x"))));
rpc.stopProxy(client, conf);
rm.stop();
}
Aggregations