use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class FiCaSchedulerApp method findNodeToUnreserve.
@VisibleForTesting
public RMContainer findNodeToUnreserve(Resource clusterResource, FiCaSchedulerNode node, SchedulerRequestKey schedulerKey, Resource minimumUnreservedResource) {
try {
readLock.lock();
// need to unreserve some other container first
NodeId idToUnreserve = getNodeIdToUnreserve(schedulerKey, minimumUnreservedResource, rc, clusterResource);
if (idToUnreserve == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("checked to see if could unreserve for app but nothing " + "reserved that matches for this app");
}
return null;
}
FiCaSchedulerNode nodeToUnreserve = ((CapacityScheduler) scheduler).getNode(idToUnreserve);
if (nodeToUnreserve == null) {
LOG.error("node to unreserve doesn't exist, nodeid: " + idToUnreserve);
return null;
}
if (LOG.isDebugEnabled()) {
LOG.debug("unreserving for app: " + getApplicationId() + " on nodeId: " + idToUnreserve + " in order to replace reserved application and place it on node: " + node.getNodeID() + " needing: " + minimumUnreservedResource);
}
// headroom
Resources.addTo(getHeadroom(), nodeToUnreserve.getReservedContainer().getReservedResource());
return nodeToUnreserve.getReservedContainer();
} finally {
readLock.unlock();
}
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class NodeQueueLoadMonitor method sortNodes.
private List<NodeId> sortNodes() {
ReentrantReadWriteLock.ReadLock readLock = clusterNodesLock.readLock();
readLock.lock();
try {
ArrayList aList = new ArrayList<>(this.clusterNodes.values());
List<NodeId> retList = new ArrayList<>();
Object[] nodes = aList.toArray();
// Collections.sort would do something similar by calling Arrays.sort
// internally but would finally iterate through the input list (aList)
// to reset the value of each element. Since we don't really care about
// 'aList', we can use the iteration to create the list of nodeIds which
// is what we ultimately care about.
Arrays.sort(nodes, (Comparator) comparator);
for (int j = 0; j < nodes.length; j++) {
retList.add(((ClusterNode) nodes[j]).nodeId);
}
return retList;
} finally {
readLock.unlock();
}
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class FSAppAttempt method createContainer.
/**
* Create and return a container object reflecting an allocation for the
* given application on the given node with the given capability and
* priority.
*
* @param node Node
* @param capability Capability
* @param schedulerKey Scheduler Key
* @return Container
*/
private Container createContainer(FSSchedulerNode node, Resource capability, SchedulerRequestKey schedulerKey) {
NodeId nodeId = node.getRMNode().getNodeID();
ContainerId containerId = BuilderUtils.newContainerId(getApplicationAttemptId(), getNewContainerId());
// Create the container
return BuilderUtils.newContainer(containerId, nodeId, node.getRMNode().getHttpAddress(), capability, schedulerKey.getPriority(), null, schedulerKey.getAllocationRequestId());
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class FifoScheduler method assignContainer.
private int assignContainer(FiCaSchedulerNode node, FifoAppAttempt application, SchedulerRequestKey schedulerKey, int assignableContainers, Resource capability, NodeType type) {
LOG.debug("assignContainers:" + " node=" + node.getRMNode().getNodeAddress() + " application=" + application.getApplicationId().getId() + " priority=" + schedulerKey.getPriority().getPriority() + " assignableContainers=" + assignableContainers + " capability=" + capability + " type=" + type);
// TODO: A buggy application with this zero would crash the scheduler.
int availableContainers = (int) (node.getUnallocatedResource().getMemorySize() / capability.getMemorySize());
int assignedContainers = Math.min(assignableContainers, availableContainers);
if (assignedContainers > 0) {
for (int i = 0; i < assignedContainers; ++i) {
NodeId nodeId = node.getRMNode().getNodeID();
ContainerId containerId = BuilderUtils.newContainerId(application.getApplicationAttemptId(), application.getNewContainerId());
// Create the container
Container container = BuilderUtils.newContainer(containerId, nodeId, node.getRMNode().getHttpAddress(), capability, schedulerKey.getPriority(), null, schedulerKey.getAllocationRequestId());
// Allocate!
// Inform the application
RMContainer rmContainer = application.allocate(type, node, schedulerKey, container);
// Inform the node
node.allocateContainer(rmContainer);
// Update usage for this container
increaseUsedResources(rmContainer);
}
}
return assignedContainers;
}
use of org.apache.hadoop.yarn.api.records.NodeId in project hadoop by apache.
the class NMTokenSecretManagerInRM method createAndGetNMToken.
public NMToken createAndGetNMToken(String applicationSubmitter, ApplicationAttemptId appAttemptId, Container container) {
try {
this.writeLock.lock();
HashSet<NodeId> nodeSet = this.appAttemptToNodeKeyMap.get(appAttemptId);
NMToken nmToken = null;
if (nodeSet != null) {
if (!nodeSet.contains(container.getNodeId())) {
LOG.info("Sending NMToken for nodeId : " + container.getNodeId() + " for container : " + container.getId());
Token token = createNMToken(container.getId().getApplicationAttemptId(), container.getNodeId(), applicationSubmitter);
nmToken = NMToken.newInstance(container.getNodeId(), token);
nodeSet.add(container.getNodeId());
}
}
return nmToken;
} finally {
this.writeLock.unlock();
}
}
Aggregations