use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk in project hadoop by apache.
the class FifoScheduler method getMaxAllocatableContainers.
private int getMaxAllocatableContainers(FifoAppAttempt application, SchedulerRequestKey schedulerKey, FiCaSchedulerNode node, NodeType type) {
PendingAsk offswitchAsk = application.getPendingAsk(schedulerKey, ResourceRequest.ANY);
int maxContainers = offswitchAsk.getCount();
if (type == NodeType.OFF_SWITCH) {
return maxContainers;
}
if (type == NodeType.RACK_LOCAL) {
PendingAsk rackLocalAsk = application.getPendingAsk(schedulerKey, node.getRackName());
if (rackLocalAsk.getCount() <= 0) {
return maxContainers;
}
maxContainers = Math.min(maxContainers, rackLocalAsk.getCount());
}
if (type == NodeType.NODE_LOCAL) {
PendingAsk nodeLocalAsk = application.getPendingAsk(schedulerKey, node.getRMNode().getHostName());
if (nodeLocalAsk.getCount() > 0) {
maxContainers = Math.min(maxContainers, nodeLocalAsk.getCount());
}
}
return maxContainers;
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk in project hadoop by apache.
the class AppSchedulingInfo method move.
public void move(Queue newQueue) {
try {
this.writeLock.lock();
QueueMetrics oldMetrics = queue.getMetrics();
QueueMetrics newMetrics = newQueue.getMetrics();
for (SchedulingPlacementSet ps : schedulerKeyToPlacementSets.values()) {
PendingAsk ask = ps.getPendingAsk(ResourceRequest.ANY);
if (ask.getCount() > 0) {
oldMetrics.decrPendingResources(user, ask.getCount(), ask.getPerAllocationResource());
newMetrics.incrPendingResources(user, ask.getCount(), ask.getPerAllocationResource());
Resource delta = Resources.multiply(ask.getPerAllocationResource(), ask.getCount());
// Update Queue
queue.decPendingResource(ps.getPrimaryRequestedNodePartition(), delta);
newQueue.incPendingResource(ps.getPrimaryRequestedNodePartition(), delta);
}
}
oldMetrics.moveAppFrom(this);
newMetrics.moveAppTo(this);
abstractUsersManager.deactivateApplication(user, applicationId);
abstractUsersManager = newQueue.getAbstractUsersManager();
abstractUsersManager.activateApplication(user, applicationId);
this.queue = newQueue;
} finally {
this.writeLock.unlock();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk in project hadoop by apache.
the class FSAppAttempt method assignContainer.
private Resource assignContainer(FSSchedulerNode node, boolean reserved) {
if (LOG.isTraceEnabled()) {
LOG.trace("Node offered to app: " + getName() + " reserved: " + reserved);
}
Collection<SchedulerRequestKey> keysToTry = (reserved) ? Collections.singletonList(node.getReservedContainer().getReservedSchedulerKey()) : getSchedulerKeys();
// (not scheduled) in order to promote better locality.
try {
writeLock.lock();
// implementation.
for (SchedulerRequestKey schedulerKey : keysToTry) {
// we already check it in isValidReservation.
if (!reserved && !hasContainerForNode(schedulerKey, node)) {
continue;
}
addSchedulingOpportunity(schedulerKey);
PendingAsk rackLocalPendingAsk = getPendingAsk(schedulerKey, node.getRackName());
PendingAsk nodeLocalPendingAsk = getPendingAsk(schedulerKey, node.getNodeName());
if (nodeLocalPendingAsk.getCount() > 0 && !appSchedulingInfo.canDelayTo(schedulerKey, node.getNodeName())) {
LOG.warn("Relax locality off is not supported on local request: " + nodeLocalPendingAsk);
}
NodeType allowedLocality;
if (scheduler.isContinuousSchedulingEnabled()) {
allowedLocality = getAllowedLocalityLevelByTime(schedulerKey, scheduler.getNodeLocalityDelayMs(), scheduler.getRackLocalityDelayMs(), scheduler.getClock().getTime());
} else {
allowedLocality = getAllowedLocalityLevel(schedulerKey, scheduler.getNumClusterNodes(), scheduler.getNodeLocalityThreshold(), scheduler.getRackLocalityThreshold());
}
if (rackLocalPendingAsk.getCount() > 0 && nodeLocalPendingAsk.getCount() > 0) {
if (LOG.isTraceEnabled()) {
LOG.trace("Assign container on " + node.getNodeName() + " node, assignType: NODE_LOCAL" + ", allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
}
return assignContainer(node, nodeLocalPendingAsk, NodeType.NODE_LOCAL, reserved, schedulerKey);
}
if (!appSchedulingInfo.canDelayTo(schedulerKey, node.getRackName())) {
continue;
}
if (rackLocalPendingAsk.getCount() > 0 && (allowedLocality.equals(NodeType.RACK_LOCAL) || allowedLocality.equals(NodeType.OFF_SWITCH))) {
if (LOG.isTraceEnabled()) {
LOG.trace("Assign container on " + node.getNodeName() + " node, assignType: RACK_LOCAL" + ", allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
}
return assignContainer(node, rackLocalPendingAsk, NodeType.RACK_LOCAL, reserved, schedulerKey);
}
PendingAsk offswitchAsk = getPendingAsk(schedulerKey, ResourceRequest.ANY);
if (!appSchedulingInfo.canDelayTo(schedulerKey, ResourceRequest.ANY)) {
continue;
}
if (offswitchAsk.getCount() > 0) {
if (getSchedulingPlacementSet(schedulerKey).getUniqueLocationAsks() <= 1 || allowedLocality.equals(NodeType.OFF_SWITCH)) {
if (LOG.isTraceEnabled()) {
LOG.trace("Assign container on " + node.getNodeName() + " node, assignType: OFF_SWITCH" + ", allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
}
return assignContainer(node, offswitchAsk, NodeType.OFF_SWITCH, reserved, schedulerKey);
}
}
if (LOG.isTraceEnabled()) {
LOG.trace("Can't assign container on " + node.getNodeName() + " node, allowedLocality: " + allowedLocality + ", priority: " + schedulerKey.getPriority() + ", app attempt id: " + this.attemptId);
}
}
} finally {
writeLock.unlock();
}
return Resources.none();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk in project hadoop by apache.
the class FSAppAttempt method hasContainerForNode.
/**
* Whether this app has containers requests that could be satisfied on the
* given node, if the node had full space.
*/
private boolean hasContainerForNode(SchedulerRequestKey key, FSSchedulerNode node) {
PendingAsk offswitchAsk = getPendingAsk(key, ResourceRequest.ANY);
Resource resource = offswitchAsk.getPerAllocationResource();
boolean hasRequestForOffswitch = offswitchAsk.getCount() > 0;
boolean hasRequestForRack = getOutstandingAsksCount(key, node.getRackName()) > 0;
boolean hasRequestForNode = getOutstandingAsksCount(key, node.getNodeName()) > 0;
boolean ret = true;
if (!(// There must be outstanding requests at the given priority:
hasRequestForOffswitch && // non-zero request for the node's rack:
(appSchedulingInfo.canDelayTo(key, ResourceRequest.ANY) || (hasRequestForRack)) && // there must be a non-zero request at the node:
(!hasRequestForRack || appSchedulingInfo.canDelayTo(key, node.getRackName()) || (hasRequestForNode)) && // The requested container must be able to fit on the node:
Resources.lessThanOrEqual(RESOURCE_CALCULATOR, null, resource, node.getRMNode().getTotalCapability()))) {
ret = false;
} else if (!getQueue().fitsInMaxShare(resource)) {
// The requested container must fit in queue maximum share
updateAMDiagnosticMsg(resource, " exceeds current queue or its parents maximum resource allowed).");
ret = false;
}
return ret;
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk in project hadoop by apache.
the class AppSchedulingInfo method stop.
public void stop() {
// clear pending resources metrics for the application
try {
this.writeLock.lock();
QueueMetrics metrics = queue.getMetrics();
for (SchedulingPlacementSet ps : schedulerKeyToPlacementSets.values()) {
PendingAsk ask = ps.getPendingAsk(ResourceRequest.ANY);
if (ask.getCount() > 0) {
metrics.decrPendingResources(user, ask.getCount(), ask.getPerAllocationResource());
// Update Queue
queue.decPendingResource(ps.getPrimaryRequestedNodePartition(), Resources.multiply(ask.getPerAllocationResource(), ask.getCount()));
}
}
metrics.finishAppAttempt(applicationId, pending, user);
// Clear requests themselves
clearRequests();
} finally {
this.writeLock.unlock();
}
}
Aggregations