use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User in project hadoop by apache.
the class LeafQueue method getTotalPendingResourcesConsideringUserLimit.
/**
* Get total pending resource considering user limit for the leaf queue. This
* will be used for calculating pending resources in the preemption monitor.
*
* Consider the headroom for each user in the queue.
* Total pending for the queue =
* sum(for each user(min((user's headroom), sum(user's pending requests))))
* NOTE:
* @param clusterResources clusterResource
* @param partition node partition
* @param deductReservedFromPending When a container is reserved in CS,
* pending resource will not be deducted.
* This could lead to double accounting when
* doing preemption:
* In normal cases, we should deduct reserved
* resource from pending to avoid
* excessive preemption.
* @return Total pending resource considering user limit
*/
public Resource getTotalPendingResourcesConsideringUserLimit(Resource clusterResources, String partition, boolean deductReservedFromPending) {
try {
readLock.lock();
Map<String, Resource> userNameToHeadroom = new HashMap<>();
Resource totalPendingConsideringUserLimit = Resource.newInstance(0, 0);
for (FiCaSchedulerApp app : getApplications()) {
String userName = app.getUser();
if (!userNameToHeadroom.containsKey(userName)) {
User user = getUser(userName);
Resource headroom = Resources.subtract(getResourceLimitForActiveUsers(app.getUser(), clusterResources, partition, SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), user.getUsed(partition));
// Make sure headroom is not negative.
headroom = Resources.componentwiseMax(headroom, Resources.none());
userNameToHeadroom.put(userName, headroom);
}
// Check if we need to deduct reserved from pending
Resource pending = app.getAppAttemptResourceUsage().getPending(partition);
if (deductReservedFromPending) {
pending = Resources.subtract(pending, app.getAppAttemptResourceUsage().getReserved(partition));
}
pending = Resources.componentwiseMax(pending, Resources.none());
Resource minpendingConsideringUserLimit = Resources.componentwiseMin(userNameToHeadroom.get(userName), pending);
Resources.addTo(totalPendingConsideringUserLimit, minpendingConsideringUserLimit);
Resources.subtractFrom(userNameToHeadroom.get(userName), minpendingConsideringUserLimit);
}
return totalPendingConsideringUserLimit;
} finally {
readLock.unlock();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User in project hadoop by apache.
the class LeafQueue method computeUserLimitAndSetHeadroom.
// It doesn't necessarily to hold application's lock here.
@Lock({ LeafQueue.class })
Resource computeUserLimitAndSetHeadroom(FiCaSchedulerApp application, Resource clusterResource, String nodePartition, SchedulingMode schedulingMode) {
String user = application.getUser();
User queueUser = getUser(user);
// Compute user limit respect requested labels,
// TODO, need consider headroom respect labels also
Resource userLimit = getResourceLimitForActiveUsers(application.getUser(), clusterResource, nodePartition, schedulingMode);
setQueueResourceLimitsInfo(clusterResource);
Resource headroom = getHeadroom(queueUser, cachedResourceLimitsForHeadroom.getLimit(), clusterResource, userLimit, nodePartition);
if (LOG.isDebugEnabled()) {
LOG.debug("Headroom calculation for user " + user + ": " + " userLimit=" + userLimit + " queueMaxAvailRes=" + cachedResourceLimitsForHeadroom.getLimit() + " consumed=" + queueUser.getUsed() + " headroom=" + headroom + " partition=" + nodePartition);
}
CapacityHeadroomProvider headroomProvider = new CapacityHeadroomProvider(queueUser, this, application, queueResourceLimitsInfo);
application.setHeadroomProvider(headroomProvider);
metrics.setAvailableResourcesToUser(user, headroom);
return userLimit;
}
Aggregations