use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.
the class ParentQueue method updateClusterResource.
@Override
public void updateClusterResource(Resource clusterResource, ResourceLimits resourceLimits) {
try {
writeLock.lock();
// Update all children
for (CSQueue childQueue : childQueues) {
// Get ResourceLimits of child queue before assign containers
ResourceLimits childLimits = getResourceLimitsOfChild(childQueue, clusterResource, resourceLimits.getLimit(), RMNodeLabelsManager.NO_LABEL);
childQueue.updateClusterResource(clusterResource, childLimits);
}
CSQueueUtils.updateQueueStatistics(resourceCalculator, clusterResource, minimumAllocation, this, labelManager, null);
} finally {
writeLock.unlock();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.
the class ParentQueue method getResourceLimitsOfChild.
private ResourceLimits getResourceLimitsOfChild(CSQueue child, Resource clusterResource, Resource parentLimits, String nodePartition) {
// Set resource-limit of a given child, child.limit =
// min(my.limit - my.used + child.used, child.max)
// Parent available resource = parent-limit - parent-used-resource
Resource parentMaxAvailableResource = Resources.subtract(parentLimits, queueUsage.getUsed(nodePartition));
// Deduct killable from used
Resources.addTo(parentMaxAvailableResource, getTotalKillableResource(nodePartition));
// Child's limit = parent-available-resource + child-used
Resource childLimit = Resources.add(parentMaxAvailableResource, child.getQueueResourceUsage().getUsed(nodePartition));
// Get child's max resource
Resource childConfiguredMaxResource = Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager.getResourceByLabel(nodePartition, clusterResource), child.getQueueCapacities().getAbsoluteMaximumCapacity(nodePartition), minimumAllocation);
// Child's limit should be capped by child configured max resource
childLimit = Resources.min(resourceCalculator, clusterResource, childLimit, childConfiguredMaxResource);
// Normalize before return
childLimit = Resources.roundDown(resourceCalculator, childLimit, minimumAllocation);
return new ResourceLimits(childLimit);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.
the class CapacityScheduler method addNode.
private void addNode(RMNode nodeManager) {
try {
writeLock.lock();
FiCaSchedulerNode schedulerNode = new FiCaSchedulerNode(nodeManager, usePortForNodeName, nodeManager.getNodeLabels());
nodeTracker.addNode(schedulerNode);
// update this node to node label manager
if (labelManager != null) {
labelManager.activateNode(nodeManager.getNodeID(), schedulerNode.getTotalResource());
}
Resource clusterResource = getClusterResource();
getRootQueue().updateClusterResource(clusterResource, new ResourceLimits(clusterResource));
LOG.info("Added node " + nodeManager.getNodeAddress() + " clusterResource: " + clusterResource);
if (scheduleAsynchronously && getNumClusterNodes() == 1) {
for (AsyncScheduleThread t : asyncSchedulerThreads) {
t.beginSchedule();
}
}
} finally {
writeLock.unlock();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.
the class CapacityScheduler method allocateContainerOnSingleNode.
/*
* Logics of allocate container on a single node (Old behavior)
*/
private CSAssignment allocateContainerOnSingleNode(PlacementSet<FiCaSchedulerNode> ps, FiCaSchedulerNode node, boolean withNodeHeartbeat) {
// driven by node heartbeat works.
if (getNode(node.getNodeID()) != node) {
LOG.error("Trying to schedule on a removed node, please double check.");
return null;
}
CSAssignment assignment;
// Assign new containers...
// 1. Check for reserved applications
// 2. Schedule if there are no reservations
RMContainer reservedContainer = node.getReservedContainer();
if (reservedContainer != null) {
FiCaSchedulerApp reservedApplication = getCurrentAttemptForContainer(reservedContainer.getContainerId());
// Try to fulfill the reservation
LOG.info("Trying to fulfill reservation for application " + reservedApplication.getApplicationId() + " on node: " + node.getNodeID());
LeafQueue queue = ((LeafQueue) reservedApplication.getQueue());
assignment = queue.assignContainers(getClusterResource(), ps, // resources, should consider labeled resources as well.
new ResourceLimits(labelManager.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, getClusterResource())), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
if (assignment.isFulfilledReservation()) {
if (withNodeHeartbeat) {
// Only update SchedulerHealth in sync scheduling, existing
// Data structure of SchedulerHealth need to be updated for
// Async mode
updateSchedulerHealth(lastNodeUpdateTime, node.getNodeID(), assignment);
}
schedulerHealth.updateSchedulerFulfilledReservationCounts(1);
ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node, queue.getParent().getQueueName(), queue.getQueueName(), ActivityState.ACCEPTED, ActivityDiagnosticConstant.EMPTY);
ActivitiesLogger.NODE.finishAllocatedNodeAllocation(activitiesManager, node, reservedContainer.getContainerId(), AllocationState.ALLOCATED_FROM_RESERVED);
} else {
ActivitiesLogger.QUEUE.recordQueueActivity(activitiesManager, node, queue.getParent().getQueueName(), queue.getQueueName(), ActivityState.ACCEPTED, ActivityDiagnosticConstant.EMPTY);
ActivitiesLogger.NODE.finishAllocatedNodeAllocation(activitiesManager, node, reservedContainer.getContainerId(), AllocationState.SKIPPED);
}
assignment.setSchedulingMode(SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
submitResourceCommitRequest(getClusterResource(), assignment);
}
// Do not schedule if there are any reservations to fulfill on the node
if (node.getReservedContainer() != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping scheduling since node " + node.getNodeID() + " is reserved by application " + node.getReservedContainer().getContainerId().getApplicationAttemptId());
}
return null;
}
// has any available or killable resource
if (calculator.computeAvailableContainers(Resources.add(node.getUnallocatedResource(), node.getTotalKillableResources()), minimumAllocation) <= 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("This node or this node partition doesn't have available or" + "killable resource");
}
return null;
}
if (LOG.isDebugEnabled()) {
LOG.debug("Trying to schedule on node: " + node.getNodeName() + ", available: " + node.getUnallocatedResource());
}
return allocateOrReserveNewContainers(ps, withNodeHeartbeat);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits in project hadoop by apache.
the class LeafQueue method updateCurrentResourceLimits.
private void updateCurrentResourceLimits(ResourceLimits currentResourceLimits, Resource clusterResource) {
// TODO: need consider non-empty node labels when resource limits supports
// node labels
// Even if ParentQueue will set limits respect child's max queue capacity,
// but when allocating reserved container, CapacityScheduler doesn't do
// this. So need cap limits by queue's max capacity here.
this.cachedResourceLimitsForHeadroom = new ResourceLimits(currentResourceLimits.getLimit());
Resource queueMaxResource = Resources.multiplyAndNormalizeDown(resourceCalculator, labelManager.getResourceByLabel(RMNodeLabelsManager.NO_LABEL, clusterResource), queueCapacities.getAbsoluteMaximumCapacity(RMNodeLabelsManager.NO_LABEL), minimumAllocation);
this.cachedResourceLimitsForHeadroom.setLimit(Resources.min(resourceCalculator, clusterResource, queueMaxResource, currentResourceLimits.getLimit()));
}
Aggregations