Search in sources :

Example 26 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class CapacitySchedulerConfiguration method getQueueOrderingPolicy.

@Private
public QueueOrderingPolicy getQueueOrderingPolicy(String queue, String parentPolicy) {
    String defaultPolicy = parentPolicy;
    if (null == defaultPolicy) {
        defaultPolicy = DEFAULT_QUEUE_ORDERING_POLICY;
    }
    String policyType = get(getQueuePrefix(queue) + ORDERING_POLICY, defaultPolicy);
    QueueOrderingPolicy qop;
    if (policyType.trim().equals(QUEUE_UTILIZATION_ORDERING_POLICY)) {
        // Doesn't respect priority
        qop = new PriorityUtilizationQueueOrderingPolicy(false);
    } else if (policyType.trim().equals(QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY)) {
        qop = new PriorityUtilizationQueueOrderingPolicy(true);
    } else {
        String message = "Unable to construct queue ordering policy=" + policyType + " queue=" + queue;
        throw new YarnRuntimeException(message);
    }
    return qop;
}
Also used : YarnRuntimeException(org.apache.hadoop.yarn.exceptions.YarnRuntimeException) PriorityUtilizationQueueOrderingPolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.PriorityUtilizationQueueOrderingPolicy) PriorityUtilizationQueueOrderingPolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.PriorityUtilizationQueueOrderingPolicy) QueueOrderingPolicy(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy.QueueOrderingPolicy) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 27 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class LeafQueue method canAssignToUser.

@Private
protected boolean canAssignToUser(Resource clusterResource, String userName, Resource limit, FiCaSchedulerApp application, String nodePartition, ResourceLimits currentResourceLimits) {
    try {
        readLock.lock();
        User user = getUser(userName);
        currentResourceLimits.setAmountNeededUnreserve(Resources.none());
        // overhead of the AM, but it's a > check, not a >= check, so...
        if (Resources.greaterThan(resourceCalculator, clusterResource, user.getUsed(nodePartition), limit)) {
            // of a reserved node if the application has reserved containers
            if (this.reservationsContinueLooking && nodePartition.equals(CommonNodeLabelsManager.NO_LABEL)) {
                if (Resources.lessThanOrEqual(resourceCalculator, clusterResource, Resources.subtract(user.getUsed(), application.getCurrentReservation()), limit)) {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("User " + userName + " in queue " + getQueueName() + " will exceed limit based on reservations - " + " consumed: " + user.getUsed() + " reserved: " + application.getCurrentReservation() + " limit: " + limit);
                    }
                    Resource amountNeededToUnreserve = Resources.subtract(user.getUsed(nodePartition), limit);
                    // we can only acquire a new container if we unreserve first to
                    // respect user-limit
                    currentResourceLimits.setAmountNeededUnreserve(amountNeededToUnreserve);
                    return true;
                }
            }
            if (LOG.isDebugEnabled()) {
                LOG.debug("User " + userName + " in queue " + getQueueName() + " will exceed limit - " + " consumed: " + user.getUsed(nodePartition) + " limit: " + limit);
            }
            return false;
        }
        return true;
    } finally {
        readLock.unlock();
    }
}
Also used : User(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User) Resource(org.apache.hadoop.yarn.api.records.Resource) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 28 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class LeafQueue method getNumPendingApplications.

@Private
public int getNumPendingApplications(String user) {
    try {
        readLock.lock();
        User u = getUser(user);
        if (null == u) {
            return 0;
        }
        return u.getPendingApplications();
    } finally {
        readLock.unlock();
    }
}
Also used : User(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.UsersManager.User) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 29 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class ServiceAuthorizationManager method refreshWithLoadedConfiguration.

@Private
public void refreshWithLoadedConfiguration(Configuration conf, PolicyProvider provider) {
    final Map<Class<?>, AccessControlList[]> newAcls = new IdentityHashMap<Class<?>, AccessControlList[]>();
    final Map<Class<?>, MachineList[]> newMachineLists = new IdentityHashMap<Class<?>, MachineList[]>();
    String defaultAcl = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL, AccessControlList.WILDCARD_ACL_VALUE);
    String defaultBlockedAcl = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL, "");
    String defaultServiceHostsKey = getHostKey(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL);
    String defaultMachineList = conf.get(defaultServiceHostsKey, MachineList.WILDCARD_VALUE);
    String defaultBlockedMachineList = conf.get(defaultServiceHostsKey + BLOCKED, "");
    // Parse the config file
    Service[] services = provider.getServices();
    if (services != null) {
        for (Service service : services) {
            AccessControlList acl = new AccessControlList(conf.get(service.getServiceKey(), defaultAcl));
            AccessControlList blockedAcl = new AccessControlList(conf.get(service.getServiceKey() + BLOCKED, defaultBlockedAcl));
            newAcls.put(service.getProtocol(), new AccessControlList[] { acl, blockedAcl });
            String serviceHostsKey = getHostKey(service.getServiceKey());
            MachineList machineList = new MachineList(conf.get(serviceHostsKey, defaultMachineList));
            MachineList blockedMachineList = new MachineList(conf.get(serviceHostsKey + BLOCKED, defaultBlockedMachineList));
            newMachineLists.put(service.getProtocol(), new MachineList[] { machineList, blockedMachineList });
        }
    }
    // Flip to the newly parsed permissions
    protocolToAcls = newAcls;
    protocolToMachineLists = newMachineLists;
}
Also used : MachineList(org.apache.hadoop.util.MachineList) IdentityHashMap(java.util.IdentityHashMap) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 30 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class RMContainerAllocator method preemptReducesIfNeeded.

@Private
@VisibleForTesting
boolean preemptReducesIfNeeded() {
    if (reduceResourceRequest.equals(Resources.none())) {
        // no reduces
        return false;
    }
    if (assignedRequests.maps.size() > 0) {
        // there are assigned mappers
        return false;
    }
    if (scheduledRequests.maps.size() <= 0) {
        // there are no pending requests for mappers
        return false;
    }
    // we have pending mappers and all assigned resources are taken by reducers
    if (reducerUnconditionalPreemptionDelayMs >= 0) {
        // preempt reducers irrespective of what the headroom is.
        if (preemptReducersForHangingMapRequests(reducerUnconditionalPreemptionDelayMs)) {
            return true;
        }
    }
    // The pending mappers haven't been waiting for too long. Let us see if
    // there are enough resources for a mapper to run. This is calculated by
    // excluding scheduled reducers from headroom and comparing it against
    // resources required to run one mapper.
    Resource scheduledReducesResource = Resources.multiply(reduceResourceRequest, scheduledRequests.reduces.size());
    Resource availableResourceForMap = Resources.subtract(getAvailableResources(), scheduledReducesResource);
    if (ResourceCalculatorUtils.computeAvailableContainers(availableResourceForMap, mapResourceRequest, getSchedulerResourceTypes()) > 0) {
        // Enough room to run a mapper
        return false;
    }
    // off before preempting reducers and preempt if okay.
    return preemptReducersForHangingMapRequests(reducerNoHeadroomPreemptionDelayMs);
}
Also used : Resource(org.apache.hadoop.yarn.api.records.Resource) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Aggregations

Private (org.apache.hadoop.classification.InterfaceAudience.Private)52 VisibleForTesting (com.google.common.annotations.VisibleForTesting)15 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)12 IOException (java.io.IOException)9 FileStatus (org.apache.hadoop.fs.FileStatus)8 ArrayList (java.util.ArrayList)6 Path (org.apache.hadoop.fs.Path)6 DataInputStream (java.io.DataInputStream)5 EOFException (java.io.EOFException)5 PrintStream (java.io.PrintStream)5 LogReader (org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader)5 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)4 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)4 Resource (org.apache.hadoop.yarn.api.records.Resource)4 YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)4 ByteString (com.google.protobuf.ByteString)2 FileNotFoundException (java.io.FileNotFoundException)2 AccessDeniedException (java.nio.file.AccessDeniedException)2 HashSet (java.util.HashSet)2 FileSystem (org.apache.hadoop.fs.FileSystem)2