Search in sources :

Example 71 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class TestLogAggregationService method createContainer.

private ContainerId createContainer(ApplicationAttemptId appAttemptId1, long cId, ContainerType containerType) {
    ContainerId containerId = BuilderUtils.newContainerId(appAttemptId1, cId);
    Resource r = BuilderUtils.newResource(1024, 1);
    ContainerTokenIdentifier containerToken = new ContainerTokenIdentifier(containerId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 100000L, 123, DUMMY_RM_IDENTIFIER, Priority.newInstance(0), 0, null, null, containerType);
    Container container = mock(Container.class);
    context.getContainers().put(containerId, container);
    when(container.getContainerTokenIdentifier()).thenReturn(containerToken);
    when(container.getContainerId()).thenReturn(containerId);
    return containerId;
}
Also used : Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) Resource(org.apache.hadoop.yarn.api.records.Resource) LocalResource(org.apache.hadoop.yarn.api.records.LocalResource) ContainerTokenIdentifier(org.apache.hadoop.yarn.security.ContainerTokenIdentifier)

Example 72 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class AbstractCSQueue method accept.

@Override
public boolean accept(Resource cluster, ResourceCommitRequest<FiCaSchedulerApp, FiCaSchedulerNode> request) {
    // Do we need to check parent queue before making this decision?
    boolean checkParentQueue = false;
    ContainerAllocationProposal<FiCaSchedulerApp, FiCaSchedulerNode> allocation = request.getFirstAllocatedOrReservedContainer();
    SchedulerContainer<FiCaSchedulerApp, FiCaSchedulerNode> schedulerContainer = allocation.getAllocatedOrReservedContainer();
    // Do not check when allocating new container from a reserved container
    if (allocation.getAllocateFromReservedContainer() == null) {
        Resource required = allocation.getAllocatedOrReservedResource();
        Resource netAllocated = Resources.subtract(required, request.getTotalReleasedResource());
        try {
            readLock.lock();
            String partition = schedulerContainer.getNodePartition();
            Resource maxResourceLimit;
            if (allocation.getSchedulingMode() == SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY) {
                maxResourceLimit = getQueueMaxResource(partition, cluster);
            } else {
                maxResourceLimit = labelManager.getResourceByLabel(schedulerContainer.getNodePartition(), cluster);
            }
            if (!Resources.fitsIn(resourceCalculator, cluster, Resources.add(queueUsage.getUsed(partition), netAllocated), maxResourceLimit)) {
                if (LOG.isDebugEnabled()) {
                    LOG.debug("Used resource=" + queueUsage.getUsed(partition) + " exceeded maxResourceLimit of the queue =" + maxResourceLimit);
                }
                return false;
            }
        } finally {
            readLock.unlock();
        }
        // Only check parent queue when something new allocated or reserved.
        checkParentQueue = true;
    }
    if (parent != null && checkParentQueue) {
        return parent.accept(cluster, request);
    }
    return true;
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) Resource(org.apache.hadoop.yarn.api.records.Resource)

Example 73 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class AbstractCSQueue method canAssignToThisQueue.

boolean canAssignToThisQueue(Resource clusterResource, String nodePartition, ResourceLimits currentResourceLimits, Resource resourceCouldBeUnreserved, SchedulingMode schedulingMode) {
    try {
        readLock.lock();
        // Get current limited resource:
        // - When doing RESPECT_PARTITION_EXCLUSIVITY allocation, we will respect
        // queues' max capacity.
        // - When doing IGNORE_PARTITION_EXCLUSIVITY allocation, we will not respect
        // queue's max capacity, queue's max capacity on the partition will be
        // considered to be 100%. Which is a queue can use all resource in the
        // partition.
        // Doing this because: for non-exclusive allocation, we make sure there's
        // idle resource on the partition, to avoid wastage, such resource will be
        // leveraged as much as we can, and preemption policy will reclaim it back
        // when partitoned-resource-request comes back.
        Resource currentLimitResource = getCurrentLimitResource(nodePartition, clusterResource, currentResourceLimits, schedulingMode);
        Resource nowTotalUsed = queueUsage.getUsed(nodePartition);
        // Set headroom for currentResourceLimits:
        // When queue is a parent queue: Headroom = limit - used + killable
        // When queue is a leaf queue: Headroom = limit - used (leaf queue cannot preempt itself)
        Resource usedExceptKillable = nowTotalUsed;
        if (null != getChildQueues() && !getChildQueues().isEmpty()) {
            usedExceptKillable = Resources.subtract(nowTotalUsed, getTotalKillableResource(nodePartition));
        }
        currentResourceLimits.setHeadroom(Resources.subtract(currentLimitResource, usedExceptKillable));
        if (Resources.greaterThanOrEqual(resourceCalculator, clusterResource, usedExceptKillable, currentLimitResource)) {
            // TODO, now only consider reservation cases when the node has no label
            if (this.reservationsContinueLooking && nodePartition.equals(RMNodeLabelsManager.NO_LABEL) && Resources.greaterThan(resourceCalculator, clusterResource, resourceCouldBeUnreserved, Resources.none())) {
                // resource-without-reserved = used - reserved
                Resource newTotalWithoutReservedResource = Resources.subtract(usedExceptKillable, resourceCouldBeUnreserved);
                // have chance to allocate on this node by unreserving some containers
                if (Resources.lessThan(resourceCalculator, clusterResource, newTotalWithoutReservedResource, currentLimitResource)) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("try to use reserved: " + getQueueName() + " usedResources: " + queueUsage.getUsed() + ", clusterResources: " + clusterResource + ", reservedResources: " + resourceCouldBeUnreserved + ", capacity-without-reserved: " + newTotalWithoutReservedResource + ", maxLimitCapacity: " + currentLimitResource);
                    }
                    return true;
                }
            }
            // Can not assign to this queue
            if (LOG.isDebugEnabled()) {
                LOG.debug("Failed to assign to queue: " + getQueueName() + " nodePatrition: " + nodePartition + ", usedResources: " + queueUsage.getUsed(nodePartition) + ", clusterResources: " + clusterResource + ", reservedResources: " + resourceCouldBeUnreserved + ", maxLimitCapacity: " + currentLimitResource + ", currTotalUsed:" + usedExceptKillable);
            }
            return false;
        }
        if (LOG.isDebugEnabled()) {
            LOG.debug("Check assign to queue: " + getQueueName() + " nodePartition: " + nodePartition + ", usedResources: " + queueUsage.getUsed(nodePartition) + ", clusterResources: " + clusterResource + ", currentUsedCapacity: " + Resources.divide(resourceCalculator, clusterResource, queueUsage.getUsed(nodePartition), labelManager.getResourceByLabel(nodePartition, clusterResource)) + ", max-capacity: " + queueCapacities.getAbsoluteMaximumCapacity(nodePartition));
        }
        return true;
    } finally {
        readLock.unlock();
    }
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource)

Example 74 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class CSQueueUtils method getMaxAvailableResourceToQueue.

private static Resource getMaxAvailableResourceToQueue(final ResourceCalculator rc, RMNodeLabelsManager nlm, CSQueue queue, Resource cluster) {
    Set<String> nodeLabels = queue.getNodeLabelsForQueue();
    Resource totalAvailableResource = Resources.createResource(0, 0);
    for (String partition : nodeLabels) {
        // Calculate guaranteed resource for a label in a queue by below logic.
        // (total label resource) * (absolute capacity of label in that queue)
        Resource queueGuranteedResource = Resources.multiply(nlm.getResourceByLabel(partition, cluster), queue.getQueueCapacities().getAbsoluteCapacity(partition));
        // Available resource in queue for a specific label will be calculated as
        // {(guaranteed resource for a label in a queue) -
        // (resource usage of that label in the queue)}
        // Finally accumulate this available resource to get total.
        Resource available = (Resources.greaterThan(rc, cluster, queueGuranteedResource, queue.getQueueResourceUsage().getUsed(partition))) ? Resources.componentwiseMax(Resources.subtractFrom(queueGuranteedResource, queue.getQueueResourceUsage().getUsed(partition)), Resources.none()) : Resources.none();
        Resources.addTo(totalAvailableResource, available);
    }
    return totalAvailableResource;
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource)

Example 75 with Resource

use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.

the class ParentQueue method killContainersToEnforceMaxQueueCapacity.

private void killContainersToEnforceMaxQueueCapacity(String partition, Resource clusterResource) {
    Iterator<RMContainer> killableContainerIter = getKillableContainers(partition);
    if (!killableContainerIter.hasNext()) {
        return;
    }
    Resource partitionResource = labelManager.getResourceByLabel(partition, null);
    Resource maxResource = Resources.multiply(partitionResource, getQueueCapacities().getAbsoluteMaximumCapacity(partition));
    while (Resources.greaterThan(resourceCalculator, partitionResource, queueUsage.getUsed(partition), maxResource)) {
        RMContainer toKillContainer = killableContainerIter.next();
        FiCaSchedulerApp attempt = csContext.getApplicationAttempt(toKillContainer.getContainerId().getApplicationAttemptId());
        FiCaSchedulerNode node = csContext.getNode(toKillContainer.getAllocatedNode());
        if (null != attempt && null != node) {
            LeafQueue lq = attempt.getCSLeafQueue();
            lq.completedContainer(clusterResource, attempt, node, toKillContainer, SchedulerUtils.createPreemptedContainerStatus(toKillContainer.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER), RMContainerEventType.KILL, null, false);
            LOG.info("Killed container=" + toKillContainer.getContainerId() + " from queue=" + lq.getQueueName() + " to make queue=" + this.getQueueName() + "'s max-capacity enforced");
        }
        if (!killableContainerIter.hasNext()) {
            break;
        }
    }
}
Also used : FiCaSchedulerNode(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode) FiCaSchedulerApp(org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp) Resource(org.apache.hadoop.yarn.api.records.Resource) RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)

Aggregations

Resource (org.apache.hadoop.yarn.api.records.Resource)500 Test (org.junit.Test)190 NodeId (org.apache.hadoop.yarn.api.records.NodeId)89 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)82 Priority (org.apache.hadoop.yarn.api.records.Priority)80 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)67 HashMap (java.util.HashMap)62 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)57 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)55 FiCaSchedulerApp (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp)53 ArrayList (java.util.ArrayList)49 ResourceLimits (org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceLimits)48 FiCaSchedulerNode (org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode)45 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)43 Container (org.apache.hadoop.yarn.api.records.Container)42 ResourceRequest (org.apache.hadoop.yarn.api.records.ResourceRequest)42 Configuration (org.apache.hadoop.conf.Configuration)34 IOException (java.io.IOException)33 LocalResource (org.apache.hadoop.yarn.api.records.LocalResource)33 Map (java.util.Map)29