Search in sources :

Example 91 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class CommonNodeLabelsManager method checkAddLabelsToNode.

protected void checkAddLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode) throws IOException {
    if (null == addedLabelsToNode || addedLabelsToNode.isEmpty()) {
        return;
    }
    // check all labels being added existed
    Set<String> knownLabels = labelCollections.keySet();
    for (Entry<NodeId, Set<String>> entry : addedLabelsToNode.entrySet()) {
        NodeId nodeId = entry.getKey();
        Set<String> labels = entry.getValue();
        if (!knownLabels.containsAll(labels)) {
            String msg = "Not all labels being added contained by known " + "label collections, please check" + ", added labels=[" + StringUtils.join(labels, ",") + "]";
            LOG.error(msg);
            throw new IOException(msg);
        }
        // same host
        if (!labels.isEmpty()) {
            Set<String> newLabels = new HashSet<String>(getLabelsByNode(nodeId));
            newLabels.addAll(labels);
            // we don't allow number of labels on a node > 1 after added labels
            if (newLabels.size() > 1) {
                String msg = String.format("%d labels specified on host=%s after add labels to node" + ", please note that we do not support specifying multiple" + " labels on a single host for now.", newLabels.size(), nodeId.getHost());
                LOG.error(msg);
                throw new IOException(msg);
            }
        }
    }
}
Also used : HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) IOException(java.io.IOException) HashSet(java.util.HashSet)

Example 92 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class CommonNodeLabelsManager method normalizeNodeIdToLabels.

protected Map<NodeId, Set<String>> normalizeNodeIdToLabels(Map<NodeId, Set<String>> nodeIdToLabels) {
    Map<NodeId, Set<String>> newMap = new TreeMap<NodeId, Set<String>>();
    for (Entry<NodeId, Set<String>> entry : nodeIdToLabels.entrySet()) {
        NodeId id = entry.getKey();
        Set<String> labels = entry.getValue();
        newMap.put(id, normalizeLabels(labels));
    }
    return newMap;
}
Also used : HashSet(java.util.HashSet) EnumSet(java.util.EnumSet) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) TreeMap(java.util.TreeMap)

Example 93 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class FileSystemNodeLabelsStore method loadFromMirror.

protected void loadFromMirror(Path newMirrorPath, Path oldMirrorPath) throws IOException {
    // If mirror.new exists, read from mirror.new,
    FSDataInputStream is = null;
    try {
        is = fs.open(newMirrorPath);
    } catch (FileNotFoundException e) {
        try {
            is = fs.open(oldMirrorPath);
        } catch (FileNotFoundException ignored) {
        }
    }
    if (null != is) {
        List<NodeLabel> labels = new AddToClusterNodeLabelsRequestPBImpl(AddToClusterNodeLabelsRequestProto.parseDelimitedFrom(is)).getNodeLabels();
        mgr.addToCluserNodeLabels(labels);
        if (mgr.isCentralizedConfiguration()) {
            // Only load node to labels mapping while using centralized configuration
            Map<NodeId, Set<String>> nodeToLabels = new ReplaceLabelsOnNodeRequestPBImpl(ReplaceLabelsOnNodeRequestProto.parseDelimitedFrom(is)).getNodeToLabels();
            mgr.replaceLabelsOnNode(nodeToLabels);
        }
        is.close();
    }
}
Also used : ReplaceLabelsOnNodeRequestPBImpl(org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.ReplaceLabelsOnNodeRequestPBImpl) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) Set(java.util.Set) AddToClusterNodeLabelsRequestPBImpl(org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.AddToClusterNodeLabelsRequestPBImpl) FileNotFoundException(java.io.FileNotFoundException) NodeId(org.apache.hadoop.yarn.api.records.NodeId) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream)

Example 94 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class NonAppendableFSNodeLabelStore method writeNewMirror.

private void writeNewMirror() throws IOException {
    ReentrantReadWriteLock.ReadLock readLock = mgr.readLock;
    try {
        // Acquire readlock to make sure we get cluster node labels and
        // node-to-labels mapping atomically.
        readLock.lock();
        List<NodeLabel> nodeLabels = mgr.getClusterNodeLabels();
        Map<NodeId, Set<String>> nodeToLabels = mgr.getNodeLabels();
        // Write mirror to mirror.new.tmp file
        Path newTmpPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new.tmp");
        FSDataOutputStream os = fs.create(newTmpPath, true);
        ((AddToClusterNodeLabelsRequestPBImpl) AddToClusterNodeLabelsRequest.newInstance(nodeLabels)).getProto().writeDelimitedTo(os);
        if (mgr.isCentralizedConfiguration()) {
            // Only save node-to-labels mapping while using centralized configuration
            ((ReplaceLabelsOnNodeRequestPBImpl) ReplaceLabelsOnNodeRequest.newInstance(nodeToLabels)).getProto().writeDelimitedTo(os);
        }
        os.close();
        // Rename mirror.new.tmp to mirror.new (will remove .new if it's existed)
        Path newPath = new Path(fsWorkingPath, MIRROR_FILENAME + ".new");
        fs.delete(newPath, false);
        fs.rename(newTmpPath, newPath);
        // Remove existing mirror and rename mirror.new to mirror
        Path mirrorPath = new Path(fsWorkingPath, MIRROR_FILENAME);
        fs.delete(mirrorPath, false);
        fs.rename(newPath, mirrorPath);
    } finally {
        readLock.unlock();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) NodeLabel(org.apache.hadoop.yarn.api.records.NodeLabel) Set(java.util.Set) NodeId(org.apache.hadoop.yarn.api.records.NodeId) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) ReentrantReadWriteLock(java.util.concurrent.locks.ReentrantReadWriteLock)

Example 95 with NodeId

use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.

the class MockResourceManagerFacade method allocate.

@SuppressWarnings("deprecation")
@Override
public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException {
    if (request.getAskList() != null && request.getAskList().size() > 0 && request.getReleaseList() != null && request.getReleaseList().size() > 0) {
        Assert.fail("The mock RM implementation does not support receiving " + "askList and releaseList in the same heartbeat");
    }
    String amrmToken = getAppIdentifier();
    ArrayList<Container> containerList = new ArrayList<Container>();
    if (request.getAskList() != null) {
        for (ResourceRequest rr : request.getAskList()) {
            for (int i = 0; i < rr.getNumContainers(); i++) {
                ContainerId containerId = ContainerId.newInstance(getApplicationAttemptId(1), containerIndex.incrementAndGet());
                Container container = Records.newRecord(Container.class);
                container.setId(containerId);
                container.setPriority(rr.getPriority());
                // We don't use the node for running containers in the test cases. So
                // it is OK to hard code it to some dummy value
                NodeId nodeId = NodeId.newInstance(!Strings.isNullOrEmpty(rr.getResourceName()) ? rr.getResourceName() : "dummy", 1000);
                container.setNodeId(nodeId);
                container.setResource(rr.getCapability());
                containerList.add(container);
                synchronized (applicationContainerIdMap) {
                    // Keep track of the containers returned to this application. We
                    // will need it in future
                    Assert.assertTrue("The application id is Not registered before allocate(): " + amrmToken, applicationContainerIdMap.containsKey(amrmToken));
                    List<ContainerId> ids = applicationContainerIdMap.get(amrmToken);
                    ids.add(containerId);
                    this.allocatedContainerMap.put(containerId, container);
                }
            }
        }
    }
    if (request.getReleaseList() != null && request.getReleaseList().size() > 0) {
        Log.getLog().info("Releasing containers: " + request.getReleaseList().size());
        synchronized (applicationContainerIdMap) {
            Assert.assertTrue("The application id is not registered before allocate(): " + amrmToken, applicationContainerIdMap.containsKey(amrmToken));
            List<ContainerId> ids = applicationContainerIdMap.get(amrmToken);
            for (ContainerId id : request.getReleaseList()) {
                boolean found = false;
                for (ContainerId c : ids) {
                    if (c.equals(id)) {
                        found = true;
                        break;
                    }
                }
                Assert.assertTrue("ContainerId " + id + " being released is not valid for application: " + conf.get("AMRMTOKEN"), found);
                ids.remove(id);
                // Return the released container back to the AM with new fake Ids. The
                // test case does not care about the IDs. The IDs are faked because
                // otherwise the LRM will throw duplication identifier exception. This
                // returning of fake containers is ONLY done for testing purpose - for
                // the test code to get confirmation that the sub-cluster resource
                // managers received the release request
                ContainerId fakeContainerId = ContainerId.newInstance(getApplicationAttemptId(1), containerIndex.incrementAndGet());
                Container fakeContainer = allocatedContainerMap.get(id);
                fakeContainer.setId(fakeContainerId);
                containerList.add(fakeContainer);
            }
        }
    }
    Log.getLog().info("Allocating containers: " + containerList.size() + " for application attempt: " + conf.get("AMRMTOKEN"));
    // Always issue a new AMRMToken as if RM rolled master key
    Token newAMRMToken = Token.newInstance(new byte[0], "", new byte[0], "");
    return AllocateResponse.newInstance(0, new ArrayList<ContainerStatus>(), containerList, new ArrayList<NodeReport>(), null, AMCommand.AM_RESYNC, 1, null, new ArrayList<NMToken>(), newAMRMToken, new ArrayList<UpdatedContainer>());
}
Also used : NMToken(org.apache.hadoop.yarn.api.records.NMToken) ArrayList(java.util.ArrayList) NMToken(org.apache.hadoop.yarn.api.records.NMToken) Token(org.apache.hadoop.yarn.api.records.Token) UpdatedContainer(org.apache.hadoop.yarn.api.records.UpdatedContainer) Container(org.apache.hadoop.yarn.api.records.Container) ContainerStatus(org.apache.hadoop.yarn.api.records.ContainerStatus) UpdatedContainer(org.apache.hadoop.yarn.api.records.UpdatedContainer) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) NodeId(org.apache.hadoop.yarn.api.records.NodeId) ResourceRequest(org.apache.hadoop.yarn.api.records.ResourceRequest) NodeReport(org.apache.hadoop.yarn.api.records.NodeReport)

Aggregations

NodeId (org.apache.hadoop.yarn.api.records.NodeId)257 Test (org.junit.Test)137 Resource (org.apache.hadoop.yarn.api.records.Resource)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)74 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)59 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)46 Container (org.apache.hadoop.yarn.api.records.Container)44 ArrayList (java.util.ArrayList)43 Configuration (org.apache.hadoop.conf.Configuration)40 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)40 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)40 HashSet (java.util.HashSet)39 Set (java.util.Set)36 HashMap (java.util.HashMap)35 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)34 Priority (org.apache.hadoop.yarn.api.records.Priority)32 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)31 IOException (java.io.IOException)29 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)29 RMNode (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode)28