Search in sources :

Example 61 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class FifoIntraQueuePreemptionPlugin method calculateUsedAMResourcesPerQueue.

private Resource calculateUsedAMResourcesPerQueue(String partition, LeafQueue leafQueue, Map<String, Resource> perUserAMUsed) {
    Collection<FiCaSchedulerApp> runningApps = leafQueue.getApplications();
    Resource amUsed = Resources.createResource(0, 0);
    for (FiCaSchedulerApp app : runningApps) {
        Resource userAMResource = perUserAMUsed.get(app.getUser());
        if (null == userAMResource) {
            userAMResource = Resources.createResource(0, 0);
            perUserAMUsed.put(app.getUser(), userAMResource);
        }
        Resources.addTo(userAMResource, app.getAMResource(partition));
        Resources.addTo(amUsed, app.getAMResource(partition));
    }
    return amUsed;
}
Also used : FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) Resource(org.apache.hadoop.yarn.api.records.Resource)

Example 62 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class IntraQueueCandidatesSelector method computeIntraQueuePreemptionDemand.

private void computeIntraQueuePreemptionDemand(Resource clusterResource, Resource totalPreemptedResourceAllowed, Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates) {
    // 1. Iterate through all partition to calculate demand within a partition.
    for (String partition : context.getAllPartitions()) {
        LinkedHashSet<String> queueNames = context.getUnderServedQueuesPerPartition(partition);
        if (null == queueNames) {
            continue;
        }
        // 2. Its better to get partition based resource limit earlier before
        // starting calculation
        Resource partitionBasedResource = context.getPartitionResource(partition);
        // 3. loop through all queues corresponding to a partition.
        for (String queueName : queueNames) {
            TempQueuePerPartition tq = context.getQueueByPartition(queueName, partition);
            LeafQueue leafQueue = tq.leafQueue;
            // skip if its parent queue
            if (null == leafQueue) {
                continue;
            }
            // 4. Consider reassignableResource as (used - actuallyToBePreempted).
            // This provides as upper limit to split apps quota in a queue.
            Resource queueReassignableResource = Resources.subtract(tq.getUsed(), tq.getActuallyToBePreempted());
            // above certain limit to consider for intra queue preemption.
            if (leafQueue.getQueueCapacities().getUsedCapacity(partition) < context.getMinimumThresholdForIntraQueuePreemption()) {
                continue;
            }
            // 6. compute the allocation of all apps based on queue's unallocated
            // capacity
            fifoPreemptionComputePlugin.computeAppsIdealAllocation(clusterResource, partitionBasedResource, tq, selectedCandidates, totalPreemptedResourceAllowed, queueReassignableResource, context.getMaxAllowableLimitForIntraQueuePreemption());
        }
    }
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource) LeafQueue(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.LeafQueue)

Example 63 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class PreemptableResourceCalculator method calculateResToObtainByPartitionForLeafQueues.

private void calculateResToObtainByPartitionForLeafQueues(Set<String> leafQueueNames, Resource clusterResource) {
    // Loop all leaf queues
    for (String queueName : leafQueueNames) {
        // check if preemption disabled for the queue
        if (context.getQueueByPartition(queueName, RMNodeLabelsManager.NO_LABEL).preemptionDisabled) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("skipping from queue=" + queueName + " because it's a non-preemptable queue");
            }
            continue;
        }
        // compute resToObtainByPartition considered inter-queue preemption
        for (TempQueuePerPartition qT : context.getQueuePartitions(queueName)) {
            // maxIgnoredOverCapacity
            if (Resources.greaterThan(rc, clusterResource, qT.getUsed(), Resources.multiply(qT.getGuaranteed(), 1.0 + context.getMaxIgnoreOverCapacity()))) {
                /*
           * We introduce a dampening factor naturalTerminationFactor that
           * accounts for natural termination of containers.
           *
           * This is added to control pace of preemption, let's say:
           * If preemption policy calculated a queue *should be* preempted 20 GB
           * And the nature_termination_factor set to 0.1. As a result, preemption
           * policy will select 20 GB * 0.1 = 2GB containers to be preempted.
           *
           * However, it doesn't work for YARN-4390:
           * For example, if a queue needs to be preempted 20GB for *one single*
           * large container, preempt 10% of such resource isn't useful.
           * So to make it simple, only apply nature_termination_factor when
           * selector is not reservedPreemptionCandidatesSelector.
           */
                Resource resToObtain = qT.toBePreempted;
                if (!isReservedPreemptionCandidatesSelector) {
                    resToObtain = Resources.multiply(qT.toBePreempted, context.getNaturalTerminationFactor());
                }
                // Only add resToObtain when it >= 0
                if (Resources.greaterThan(rc, clusterResource, resToObtain, Resources.none())) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Queue=" + queueName + " partition=" + qT.partition + " resource-to-obtain=" + resToObtain);
                    }
                }
                qT.setActuallyToBePreempted(Resources.clone(resToObtain));
            } else {
                qT.setActuallyToBePreempted(Resources.none());
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug(qT);
            }
        }
    }
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource)

Example 64 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestContainerResourceUsage method calculateContainerResourceMetrics.

private AggregateAppResourceUsage calculateContainerResourceMetrics(RMContainer rmContainer) {
    Resource resource = rmContainer.getContainer().getResource();
    long usedMillis = rmContainer.getFinishTime() - rmContainer.getCreationTime();
    long memorySeconds = resource.getMemorySize() * usedMillis / DateUtils.MILLIS_PER_SECOND;
    long vcoreSeconds = resource.getVirtualCores() * usedMillis / DateUtils.MILLIS_PER_SECOND;
    return new AggregateAppResourceUsage(memorySeconds, vcoreSeconds);
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource) AggregateAppResourceUsage(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage)

Example 65 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestApplicationClientProtocolOnHA method testSubmitApplicationOnHA.

@Test(timeout = 15000)
public void testSubmitApplicationOnHA() throws Exception {
    ApplicationSubmissionContext appContext = Records.newRecord(ApplicationSubmissionContext.class);
    appContext.setApplicationId(cluster.createFakeAppId());
    ContainerLaunchContext amContainer = Records.newRecord(ContainerLaunchContext.class);
    appContext.setAMContainerSpec(amContainer);
    Resource capability = Records.newRecord(Resource.class);
    capability.setMemorySize(10);
    capability.setVirtualCores(1);
    appContext.setResource(capability);
    ApplicationId appId = client.submitApplication(appContext);
    Assert.assertTrue(getActiveRM().getRMContext().getRMApps().containsKey(appId));
}
Also used : ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) Resource(org.apache.hadoop.yarn.api.records.Resource) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Aggregations

Resource (org.apache.hadoop.yarn.api.records.Resource)500 Test (org.junit.Test)190 NodeId (org.apache.hadoop.yarn.api.records.NodeId)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)82 Priority (org.apache.hadoop.yarn.api.records.Priority)80 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)67 HashMap (java.util.HashMap)62 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)57 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)55 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)53 ArrayList (java.util.ArrayList)49 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)48 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)45 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)43 Container (org.apache.hadoop.yarn.api.records.Container)42 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)42 Configuration (org.apache.hadoop.conf.Configuration)34 IOException (java.io.IOException)33 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)33 Map (java.util.Map)29