use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class CapacitySchedulerConfiguration method getQueueOrderingPolicy.
@Private
public QueueOrderingPolicy getQueueOrderingPolicy(String queue, String parentPolicy) {
String defaultPolicy = parentPolicy;
if (null == defaultPolicy) {
defaultPolicy = DEFAULT_QUEUE_ORDERING_POLICY;
}
String policyType = get(getQueuePrefix(queue) + ORDERING_POLICY, defaultPolicy);
QueueOrderingPolicy qop;
if (policyType.trim().equals(QUEUE_UTILIZATION_ORDERING_POLICY)) {
// Doesn't respect priority
qop = new PriorityUtilizationQueueOrderingPolicy(false);
} else if (policyType.trim().equals(QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY)) {
qop = new PriorityUtilizationQueueOrderingPolicy(true);
} else {
String message = "Unable to construct queue ordering policy=" + policyType + " queue=" + queue;
throw new YarnRuntimeException(message);
}
return qop;
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LeafQueue method canAssignToUser.
@Private
protected boolean canAssignToUser(Resource clusterResource, String userName, Resource limit, FiCaSchedulerApp application, String nodePartition, ResourceLimits currentResourceLimits) {
try {
readLock.lock();
User user = getUser(userName);
currentResourceLimits.setAmountNeededUnreserve(Resources.none());
// overhead of the AM, but it's a > check, not a >= check, so...
if (Resources.greaterThan(resourceCalculator, clusterResource, user.getUsed(nodePartition), limit)) {
// of a reserved node if the application has reserved containers
if (this.reservationsContinueLooking && nodePartition.equals(CommonNodeLabelsManager.NO_LABEL)) {
if (Resources.lessThanOrEqual(resourceCalculator, clusterResource, Resources.subtract(user.getUsed(), application.getCurrentReservation()), limit)) {
if (LOG.isDebugEnabled()) {
LOG.debug("User " + userName + " in queue " + getQueueName() + " will exceed limit based on reservations - " + " consumed: " + user.getUsed() + " reserved: " + application.getCurrentReservation() + " limit: " + limit);
}
Resource amountNeededToUnreserve = Resources.subtract(user.getUsed(nodePartition), limit);
// we can only acquire a new container if we unreserve first to
// respect user-limit
currentResourceLimits.setAmountNeededUnreserve(amountNeededToUnreserve);
return true;
}
}
if (LOG.isDebugEnabled()) {
LOG.debug("User " + userName + " in queue " + getQueueName() + " will exceed limit - " + " consumed: " + user.getUsed(nodePartition) + " limit: " + limit);
}
return false;
}
return true;
} finally {
readLock.unlock();
}
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LeafQueue method getNumPendingApplications.
@Private
public int getNumPendingApplications(String user) {
try {
readLock.lock();
User u = getUser(user);
if (null == u) {
return 0;
}
return u.getPendingApplications();
} finally {
readLock.unlock();
}
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class ServiceAuthorizationManager method refreshWithLoadedConfiguration.
@Private
public void refreshWithLoadedConfiguration(Configuration conf, PolicyProvider provider) {
final Map<Class<?>, AccessControlList[]> newAcls = new IdentityHashMap<Class<?>, AccessControlList[]>();
final Map<Class<?>, MachineList[]> newMachineLists = new IdentityHashMap<Class<?>, MachineList[]>();
String defaultAcl = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL, AccessControlList.WILDCARD_ACL_VALUE);
String defaultBlockedAcl = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_BLOCKED_ACL, "");
String defaultServiceHostsKey = getHostKey(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_AUTHORIZATION_DEFAULT_ACL);
String defaultMachineList = conf.get(defaultServiceHostsKey, MachineList.WILDCARD_VALUE);
String defaultBlockedMachineList = conf.get(defaultServiceHostsKey + BLOCKED, "");
// Parse the config file
Service[] services = provider.getServices();
if (services != null) {
for (Service service : services) {
AccessControlList acl = new AccessControlList(conf.get(service.getServiceKey(), defaultAcl));
AccessControlList blockedAcl = new AccessControlList(conf.get(service.getServiceKey() + BLOCKED, defaultBlockedAcl));
newAcls.put(service.getProtocol(), new AccessControlList[] { acl, blockedAcl });
String serviceHostsKey = getHostKey(service.getServiceKey());
MachineList machineList = new MachineList(conf.get(serviceHostsKey, defaultMachineList));
MachineList blockedMachineList = new MachineList(conf.get(serviceHostsKey + BLOCKED, defaultBlockedMachineList));
newMachineLists.put(service.getProtocol(), new MachineList[] { machineList, blockedMachineList });
}
}
// Flip to the newly parsed permissions
protocolToAcls = newAcls;
protocolToMachineLists = newMachineLists;
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class RMContainerAllocator method preemptReducesIfNeeded.
@Private
@VisibleForTesting
boolean preemptReducesIfNeeded() {
if (reduceResourceRequest.equals(Resources.none())) {
// no reduces
return false;
}
if (assignedRequests.maps.size() > 0) {
// there are assigned mappers
return false;
}
if (scheduledRequests.maps.size() <= 0) {
// there are no pending requests for mappers
return false;
}
// we have pending mappers and all assigned resources are taken by reducers
if (reducerUnconditionalPreemptionDelayMs >= 0) {
// preempt reducers irrespective of what the headroom is.
if (preemptReducersForHangingMapRequests(reducerUnconditionalPreemptionDelayMs)) {
return true;
}
}
// The pending mappers haven't been waiting for too long. Let us see if
// there are enough resources for a mapper to run. This is calculated by
// excluding scheduled reducers from headroom and comparing it against
// resources required to run one mapper.
Resource scheduledReducesResource = Resources.multiply(reduceResourceRequest, scheduledRequests.reduces.size());
Resource availableResourceForMap = Resources.subtract(getAvailableResources(), scheduledReducesResource);
if (ResourceCalculatorUtils.computeAvailableContainers(availableResourceForMap, mapResourceRequest, getSchedulerResourceTypes()) > 0) {
// Enough room to run a mapper
return false;
}
// off before preempting reducers and preempt if okay.
return preemptReducersForHangingMapRequests(reducerNoHeadroomPreemptionDelayMs);
}
Aggregations