Search in sources :

Example 6 with NodeLabel

use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.

the class GetClusterNodeLabelsResponsePBImpl method getNodeLabels.

/**
   * @deprecated Use {@link #getNodeLabelList()} instead.
   */
@Override
@Deprecated
public synchronized Set<String> getNodeLabels() {
    Set<String> set = new HashSet<>();
    List<NodeLabel> labelList = getNodeLabelList();
    if (labelList != null) {
        for (NodeLabel label : labelList) {
            set.add(label.getName());
        }
    }
    return set;
}
Also used : NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) HashSet(java.util.HashSet)

Example 7 with NodeLabel

use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.

the class FileSystemNodeLabelsStore method loadFromMirror.

protected void loadFromMirror(Path newMirrorPath, Path oldMirrorPath) throws IOException {
    // If mirror.new exists, read from mirror.new,
    FSDataInputStream is = null;
    try {
        is = fs.open(newMirrorPath);
    } catch (FileNotFoundException e) {
        try {
            is = fs.open(oldMirrorPath);
        } catch (FileNotFoundException ignored) {
        }
    }
    if (null != is) {
        List<NodeLabel> labels = new AddToClusterNodeLabelsRequestPBImpl(AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)).getNodeLabels();
        mgr.addToCluserNodeLabels(labels);
        if (mgr.isCentralizedConfiguration()) {
            // Only load node to labels mapping while using centralized configuration
            Map<NodeId, Set<String>> nodeToLabels = new ReplaceLabelsOnNodeRequestPBImpl(ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is)).getNodeToLabels();
            mgr.replaceLabelsOnNode(nodeToLabels);
        }
        is.close();
    }
}
Also used : ReplaceLabelsOnNodeRequestPBImpl(org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) Set(java.util.Set) AddToClusterNodeLabelsRequestPBImpl(org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl) FileNotFoundException(java.io.FileNotFoundException) NodeId(org.apache.hadoop.yarn.api.records.NodeId) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 8 with NodeLabel

use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.

the class NonAppendableFSNodeLabelStore method writeNewMirror.

private void writeNewMirror() throws IOException {
    ReentrantReadWriteLock.ReadLock readLock = mgr.readLock;
    try {
        // Acquire readlock to make sure we get cluster node labels and
        // node-to-labels mapping atomically.
        readLock.lock();
        List<NodeLabel> nodeLabels = mgr.getClusterNodeLabels();
        Map<NodeId, Set<String>> nodeToLabels = mgr.getNodeLabels();
        // Write mirror to mirror.new.tmp file
        Path newTmpPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new.tmp");
        FSDataOutputStream os = fs.create(newTmpPath, true);
        ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest.newInstance(nodeLabels)).getProto().writeDelimitedTo(os);
        if (mgr.isCentralizedConfiguration()) {
            // Only save node-to-labels mapping while using centralized configuration
            ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest.newInstance(nodeToLabels)).getProto().writeDelimitedTo(os);
        }
        os.close();
        // Rename mirror.new.tmp to mirror.new (will remove .new if it's existed)
        Path newPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new");
        fs.delete(newPath, false);
        fs.rename(newTmpPath, newPath);
        // Remove existing mirror and rename mirror.new to mirror
        Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
        fs.delete(mirrorPath, false);
        fs.rename(newPath, mirrorPath);
    } finally {
        readLock.unlock();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock)

Example 9 with NodeLabel

use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.

the class NodeHeartbeatRequestPBImpl method initNodeLabels.

private void initNodeLabels() {
    if (this.labels != null) {
        return;
    }
    NodeHeartbeatRequestProtoOrBuilder p = viaProto ? proto : builder;
    if (!p.hasNodeLabels()) {
        labels = null;
        return;
    }
    NodeLabelsProto nodeLabels = p.getNodeLabels();
    labels = new HashSet<NodeLabel>();
    for (NodeLabelProto nlp : nodeLabels.getNodeLabelsList()) {
        labels.add(convertFromProtoFormat(nlp));
    }
}
Also used : NodeLabelsProto(org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeLabelsProto) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) NodeLabelProto(org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto) NodeHeartbeatRequestProtoOrBuilder(org.apache.hadoop.yarn.proto.YarnServerCommonServiceProtos.NodeHeartbeatRequestProtoOrBuilder)

Example 10 with NodeLabel

use of org.apache.hadoop.yarn.api.records.NodeLabel in project hadoop by apache.

the class TestClientRMService method testGetNodeLabels.

@Test
public void testGetNodeLabels() throws Exception {
    MockRM rm = new MockRM() {

        protected ClientRMService createClientRMService() {
            return new ClientRMService(this.rmContext, scheduler, this.rmAppManager, this.applicationACLsManager, this.queueACLsManager, this.getRMContext().getRMDelegationTokenSecretManager());
        }

        ;
    };
    rm.start();
    NodeLabel labelX = NodeLabel.newInstance("x", false);
    NodeLabel labelY = NodeLabel.newInstance("y");
    RMNodeLabelsManager labelsMgr = rm.getRMContext().getNodeLabelManager();
    labelsMgr.addToCluserNodeLabels(ImmutableSet.of(labelX, labelY));
    NodeId node1 = NodeId.newInstance("host1", 1234);
    NodeId node2 = NodeId.newInstance("host2", 1234);
    Map<NodeId, Set<String>> map = new HashMap<NodeId, Set<String>>();
    map.put(node1, ImmutableSet.of("x"));
    map.put(node2, ImmutableSet.of("y"));
    labelsMgr.replaceLabelsOnNode(map);
    // Create a client.
    Configuration conf = new Configuration();
    YarnRPC rpc = YarnRPC.create(conf);
    InetSocketAddress rmAddress = rm.getClientRMService().getBindAddress();
    LOG.info("Connecting to ResourceManager at " + rmAddress);
    ApplicationClientProtocol client = (ApplicationClientProtocol) rpc.getProxy(ApplicationClientProtocol.class, rmAddress, conf);
    // Get node labels collection
    GetClusterNodeLabelsResponse response = client.getClusterNodeLabels(GetClusterNodeLabelsRequest.newInstance());
    Assert.assertTrue(response.getNodeLabelList().containsAll(Arrays.asList(labelX, labelY)));
    // Get node labels mapping
    GetNodesToLabelsResponse response1 = client.getNodeToLabels(GetNodesToLabelsRequest.newInstance());
    Map<NodeId, Set<String>> nodeToLabels = response1.getNodeToLabels();
    Assert.assertTrue(nodeToLabels.keySet().containsAll(Arrays.asList(node1, node2)));
    Assert.assertTrue(nodeToLabels.get(node1).containsAll(Arrays.asList(labelX.getName())));
    Assert.assertTrue(nodeToLabels.get(node2).containsAll(Arrays.asList(labelY.getName())));
    // Below label "x" is not present in the response as exclusivity is true
    Assert.assertFalse(nodeToLabels.get(node1).containsAll(Arrays.asList(NodeLabel.newInstance("x"))));
    rpc.stopProxy(client, conf);
    rm.stop();
}
Also used : EnumSet(java.util.EnumSet) Set(java.util.Set) ImmutableSet(com.google.common.collect.ImmutableSet) HashSet(java.util.HashSet) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) InetSocketAddress(java.net.InetSocketAddress) Matchers.anyString(org.mockito.Matchers.anyString) YarnRPC(org.apache.hadoop.yarn.ipc.YarnRPC) GetClusterNodeLabelsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse) ApplicationClientProtocol(org.apache.hadoop.yarn.api.ApplicationClientProtocol) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) NodeId(org.apache.hadoop.yarn.api.records.NodeId) GetNodesToLabelsResponse(org.apache.hadoop.yarn.api.protocolrecords.GetNodesToLabelsResponse) RMNodeLabelsManager(org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager) Test(org.junit.Test)

Aggregations

NodeLabel (org.apache.hadoop.yarn.api.records.NodeLabel)25 NodeId (org.apache.hadoop.yarn.api.records.NodeId)9 HashSet (java.util.HashSet)7 Set (java.util.Set)7 ArrayList (java.util.ArrayList)6 NodeLabelProto (org.apache.hadoop.yarn.proto.YarnProtos.NodeLabelProto)6 EnumSet (java.util.EnumSet)5 Test (org.junit.Test)5 HashMap (java.util.HashMap)4 GET (javax.ws.rs.GET)4 Path (javax.ws.rs.Path)4 Produces (javax.ws.rs.Produces)4 ImmutableSet (com.google.common.collect.ImmutableSet)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)3 NodeLabelsInfo (org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeLabelsInfo)3 InetSocketAddress (java.net.InetSocketAddress)2 Configuration (org.apache.hadoop.conf.Configuration)2 ApplicationClientProtocol (org.apache.hadoop.yarn.api.ApplicationClientProtocol)2 GetClusterNodeLabelsResponse (org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodeLabelsResponse)2 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)2