use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class FifoIntraQueuePreemptionPlugin method calculateUsedAMResourcesPerQueue.
private Resource calculateUsedAMResourcesPerQueue(String partition, LeafQueue leafQueue, Map<String, Resource> perUserAMUsed) {
Collection<FiCaSchedulerApp> runningApps = leafQueue.getApplications();
Resource amUsed = Resources.createResource(0, 0);
for (FiCaSchedulerApp app : runningApps) {
Resource userAMResource = perUserAMUsed.get(app.getUser());
if (null == userAMResource) {
userAMResource = Resources.createResource(0, 0);
perUserAMUsed.put(app.getUser(), userAMResource);
}
Resources.addTo(userAMResource, app.getAMResource(partition));
Resources.addTo(amUsed, app.getAMResource(partition));
}
return amUsed;
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class IntraQueueCandidatesSelector method computeIntraQueuePreemptionDemand.
private void computeIntraQueuePreemptionDemand(Resource clusterResource, Resource totalPreemptedResourceAllowed, Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates) {
// 1. Iterate through all partition to calculate demand within a partition.
for (String partition : context.getAllPartitions()) {
LinkedHashSet<String> queueNames = context.getUnderServedQueuesPerPartition(partition);
if (null == queueNames) {
continue;
}
// 2. Its better to get partition based resource limit earlier before
// starting calculation
Resource partitionBasedResource = context.getPartitionResource(partition);
// 3. loop through all queues corresponding to a partition.
for (String queueName : queueNames) {
TempQueuePerPartition tq = context.getQueueByPartition(queueName, partition);
LeafQueue leafQueue = tq.leafQueue;
// skip if its parent queue
if (null == leafQueue) {
continue;
}
// 4. Consider reassignableResource as (used - actuallyToBePreempted).
// This provides as upper limit to split apps quota in a queue.
Resource queueReassignableResource = Resources.subtract(tq.getUsed(), tq.getActuallyToBePreempted());
// above certain limit to consider for intra queue preemption.
if (leafQueue.getQueueCapacities().getUsedCapacity(partition) < context.getMinimumThresholdForIntraQueuePreemption()) {
continue;
}
// 6. compute the allocation of all apps based on queue's unallocated
// capacity
fifoPreemptionComputePlugin.computeAppsIdealAllocation(clusterResource, partitionBasedResource, tq, selectedCandidates, totalPreemptedResourceAllowed, queueReassignableResource, context.getMaxAllowableLimitForIntraQueuePreemption());
}
}
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class PreemptableResourceCalculator method calculateResToObtainByPartitionForLeafQueues.
private void calculateResToObtainByPartitionForLeafQueues(Set<String> leafQueueNames, Resource clusterResource) {
// Loop all leaf queues
for (String queueName : leafQueueNames) {
// check if preemption disabled for the queue
if (context.getQueueByPartition(queueName, RMNodeLabelsManager.NO_LABEL).preemptionDisabled) {
if (LOG.isDebugEnabled()) {
LOG.debug("skipping from queue=" + queueName + " because it's a non-preemptable queue");
}
continue;
}
// compute resToObtainByPartition considered inter-queue preemption
for (TempQueuePerPartition qT : context.getQueuePartitions(queueName)) {
// maxIgnoredOverCapacity
if (Resources.greaterThan(rc, clusterResource, qT.getUsed(), Resources.multiply(qT.getGuaranteed(), 1.0 + context.getMaxIgnoreOverCapacity()))) {
/*
* We introduce a dampening factor naturalTerminationFactor that
* accounts for natural termination of containers.
*
* This is added to control pace of preemption, let's say:
* If preemption policy calculated a queue *should be* preempted 20 GB
* And the nature_termination_factor set to 0.1. As a result, preemption
* policy will select 20 GB * 0.1 = 2GB containers to be preempted.
*
* However, it doesn't work for YARN-4390:
* For example, if a queue needs to be preempted 20GB for *one single*
* large container, preempt 10% of such resource isn't useful.
* So to make it simple, only apply nature_termination_factor when
* selector is not reservedPreemptionCandidatesSelector.
*/
Resource resToObtain = qT.toBePreempted;
if (!isReservedPreemptionCandidatesSelector) {
resToObtain = Resources.multiply(qT.toBePreempted, context.getNaturalTerminationFactor());
}
// Only add resToObtain when it >= 0
if (Resources.greaterThan(rc, clusterResource, resToObtain, Resources.none())) {
if (LOG.isDebugEnabled()) {
LOG.debug("Queue=" + queueName + " partition=" + qT.partition + " resource-to-obtain=" + resToObtain);
}
}
qT.setActuallyToBePreempted(Resources.clone(resToObtain));
} else {
qT.setActuallyToBePreempted(Resources.none());
}
if (LOG.isDebugEnabled()) {
LOG.debug(qT);
}
}
}
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestContainerResourceUsage method calculateContainerResourceMetrics.
private AggregateAppResourceUsage calculateContainerResourceMetrics(RMContainer rmContainer) {
Resource resource = rmContainer.getContainer().getResource();
long usedMillis = rmContainer.getFinishTime() - rmContainer.getCreationTime();
long memorySeconds = resource.getMemorySize() * usedMillis / DateUtils.MILLIS_PER_SECOND;
long vcoreSeconds = resource.getVirtualCores() * usedMillis / DateUtils.MILLIS_PER_SECOND;
return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds);
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestApplicationClientProtocolOnHA method testSubmitApplicationOnHA.
@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
appContext.setApplicationId(cluster.createFakeAppId());
ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
appContext.setAMContainerSpec(amContainer);
Resource capability = Records.newRecord(Resource.class);
capability.setMemorySize(10);
capability.setVirtualCores(1);
appContext.setResource(capability);
ApplicationId appId = client.submitApplication(appContext);
Assert.assertTrue(getActiveRM().getRMContext().getRMApps().containsKey(appId));
}
Aggregations