use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue in project hadoop by apache.
the class SLSCapacityScheduler method initQueueMetrics.
private void initQueueMetrics(CSQueue queue) {
if (queue instanceof LeafQueue) {
SortedMap<String, Counter> counterMap = metrics.getCounters();
String queueName = queue.getQueueName();
String[] names = new String[] { QUEUE_COUNTER_PREFIX + queueName + ".pending.memory", QUEUE_COUNTER_PREFIX + queueName + ".pending.cores", QUEUE_COUNTER_PREFIX + queueName + ".allocated.memory", QUEUE_COUNTER_PREFIX + queueName + ".allocated.cores" };
for (int i = names.length - 1; i >= 0; i--) {
if (!counterMap.containsKey(names[i])) {
metrics.counter(names[i]);
counterMap = metrics.getCounters();
}
}
queueLock.lock();
try {
if (!schedulerMetrics.isTracked(queueName)) {
schedulerMetrics.trackQueue(queueName);
}
} finally {
queueLock.unlock();
}
return;
}
for (CSQueue child : queue.getChildQueues()) {
initQueueMetrics(child);
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue in project hadoop by apache.
the class IntraQueueCandidatesSelector method selectCandidates.
@Override
public Map<ApplicationAttemptId, Set<RMContainer>> selectCandidates(Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates, Resource clusterResource, Resource totalPreemptedResourceAllowed) {
// 1. Calculate the abnormality within each queue one by one.
computeIntraQueuePreemptionDemand(clusterResource, totalPreemptedResourceAllowed, selectedCandidates);
// 2. Previous selectors (with higher priority) could have already
// selected containers. We need to deduct pre-emptable resources
// based on already selected candidates.
CapacitySchedulerPreemptionUtils.deductPreemptableResourcesBasedSelectedCandidates(preemptionContext, selectedCandidates);
// 3. Loop through all partitions to select containers for preemption.
for (String partition : preemptionContext.getAllPartitions()) {
LinkedHashSet<String> queueNames = preemptionContext.getUnderServedQueuesPerPartition(partition);
// Error check to handle non-mapped labels to queue.
if (null == queueNames) {
continue;
}
// 4. Iterate from most under-served queue in order.
for (String queueName : queueNames) {
LeafQueue leafQueue = preemptionContext.getQueueByPartition(queueName, RMNodeLabelsManager.NO_LABEL).leafQueue;
// skip if not a leafqueue
if (null == leafQueue) {
continue;
}
// Don't preempt if disabled for this queue.
if (leafQueue.getPreemptionDisabled()) {
continue;
}
// 5. Calculate the resource to obtain per partition
Map<String, Resource> resToObtainByPartition = fifoPreemptionComputePlugin.getResourceDemandFromAppsPerQueue(queueName, partition);
// containers with known policy from inter-queue preemption.
try {
leafQueue.getReadLock().lock();
Iterator<FiCaSchedulerApp> desc = leafQueue.getOrderingPolicy().getPreemptionIterator();
while (desc.hasNext()) {
FiCaSchedulerApp app = desc.next();
preemptFromLeastStarvedApp(selectedCandidates, clusterResource, totalPreemptedResourceAllowed, resToObtainByPartition, leafQueue, app);
}
} finally {
leafQueue.getReadLock().unlock();
}
}
}
return selectedCandidates;
}
Aggregations