use of org.apache.hadoop.yarn.api.records.Priority in project alluxio by Alluxio.
the class ContainerAllocator method requestContainers.
private void requestContainers() throws Exception {
String[] hosts;
boolean relaxLocality;
// YARN requires that priority for relaxed-locality requests is different from strict-locality.
Priority priority;
if (mPreferredHost != null) {
hosts = new String[] { mPreferredHost };
relaxLocality = mPreferredHost.equals("any");
priority = Priority.newInstance(100);
} else {
hosts = getPotentialWorkerHosts();
relaxLocality = true;
priority = Priority.newInstance(101);
}
int numContainersToRequest = mTargetNumContainers - mAllocatedContainers.size();
LOG.info("Requesting {} {} containers", numContainersToRequest, mContainerName);
if (hosts.length * mMaxContainersPerHost < numContainersToRequest) {
throw new RuntimeException(ExceptionMessage.YARN_NOT_ENOUGH_HOSTS.getMessage(numContainersToRequest, mContainerName, hosts.length));
}
ContainerRequest containerRequest = new ContainerRequest(mResource, hosts, null, /* any racks */
priority, relaxLocality);
LOG.info("Making {} resource request(s) for Alluxio {}s with cpu {} memory {}MB on hosts {}", numContainersToRequest, mContainerName, mResource.getVirtualCores(), mResource.getMemory(), hosts);
for (int i = 0; i < numContainersToRequest; i++) {
mRMClient.addContainerRequest(containerRequest);
}
}
use of org.apache.hadoop.yarn.api.records.Priority in project asterixdb by apache.
the class AsterixApplicationMaster method hostToRequest.
/**
* Asks the RM for a particular host, nicely.
*
* @param host
* The host to request
* @param cc
* Whether or not the host is the CC
* @return A container request that is (hopefully) for the host we asked for.
*/
private ContainerRequest hostToRequest(String host, boolean cc) throws UnknownHostException {
InetAddress hostIp = InetAddress.getByName(host);
Priority pri = Records.newRecord(Priority.class);
pri.setPriority(0);
Resource capability = Records.newRecord(Resource.class);
if (cc) {
capability.setMemory(ccMem);
} else {
capability.setMemory(ncMem);
}
//we dont set anything else because we don't care about that and yarn doesn't honor it yet
String[] hosts = new String[1];
//TODO this is silly
hosts[0] = hostIp.getHostName();
LOG.info("IP addr: " + host + " resolved to " + hostIp.getHostName());
ContainerRequest request = new ContainerRequest(capability, hosts, null, pri, false);
LOG.info("Requested host ask: " + request.getNodes());
return request;
}
use of org.apache.hadoop.yarn.api.records.Priority in project hive by apache.
the class LlapTaskSchedulerService method schedulePendingTasks.
@VisibleForTesting
protected void schedulePendingTasks() throws InterruptedException {
Ref<TaskInfo> downgradedTask = new Ref<>(null);
writeLock.lock();
try {
if (LOG.isDebugEnabled()) {
LOG.debug("ScheduleRun: {}", constructPendingTaskCountsLogMessage());
}
Iterator<Entry<Priority, List<TaskInfo>>> pendingIterator = pendingTasks.entrySet().iterator();
Resource totalResource = getTotalResources();
while (pendingIterator.hasNext()) {
Entry<Priority, List<TaskInfo>> entry = pendingIterator.next();
List<TaskInfo> taskListAtPriority = entry.getValue();
Iterator<TaskInfo> taskIter = taskListAtPriority.iterator();
boolean scheduledAllAtPriority = true;
while (taskIter.hasNext()) {
// TODO Optimization: Add a check to see if there's any capacity available. No point in
// walking through all active nodes, if they don't have potential capacity.
TaskInfo taskInfo = taskIter.next();
if (taskInfo.getNumPreviousAssignAttempts() == 1) {
dagStats.registerDelayedAllocation();
}
taskInfo.triedAssigningTask();
ScheduleResult scheduleResult = scheduleTask(taskInfo, totalResource, downgradedTask);
// Note: we must handle downgradedTask after this. We do it at the end, outside the lock.
if (LOG.isDebugEnabled()) {
LOG.debug("ScheduleResult for Task: {} = {}", taskInfo, scheduleResult);
}
if (scheduleResult == ScheduleResult.SCHEDULED) {
taskIter.remove();
} else {
if (scheduleResult == ScheduleResult.INADEQUATE_TOTAL_RESOURCES) {
LOG.info("Inadequate total resources before scheduling pending tasks." + " Signalling scheduler timeout monitor thread to start timer.");
startTimeoutMonitor();
// TODO Nothing else should be done for this task. Move on.
}
// Try pre-empting a task so that a higher priority task can take it's place.
// Preempt only if there's no pending preemptions to avoid preempting twice for a task.
String[] potentialHosts;
if (scheduleResult == ScheduleResult.DELAYED_LOCALITY) {
// Add the task to the delayed task queue if it does not already exist.
maybeAddToDelayedTaskQueue(taskInfo);
// Try preempting a lower priority task in any case.
// preempt only on specific hosts, if no preemptions already exist on those.
potentialHosts = taskInfo.requestedHosts;
// Protect against a bad location being requested.
if (potentialHosts == null || potentialHosts.length == 0) {
potentialHosts = null;
}
} else {
// preempt on any host.
potentialHosts = null;
}
// At this point we're dealing with all return types, except ScheduleResult.SCHEDULED.
if (potentialHosts != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting to preempt on requested host for task={}, potentialHosts={}", taskInfo, Arrays.toString(potentialHosts));
}
// Preempt on specific host
boolean shouldPreempt = true;
for (String host : potentialHosts) {
// Preempt only if there are no pending preemptions on the same host
// When the premption registers, the request at the highest priority will be given the slot,
// even if the initial preemption was caused by some other task.
// TODO Maybe register which task the preemption was for, to avoid a bad non-local allocation.
MutableInt pendingHostPreemptions = pendingPreemptionsPerHost.get(host);
if (pendingHostPreemptions != null && pendingHostPreemptions.intValue() > 0) {
shouldPreempt = false;
LOG.debug("Not preempting for task={}. Found an existing preemption request on host={}, pendingPreemptionCount={}", taskInfo.task, host, pendingHostPreemptions.intValue());
break;
}
}
if (shouldPreempt) {
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting to preempt for {} on potential hosts={}. TotalPendingPreemptions={}", taskInfo.task, Arrays.toString(potentialHosts), pendingPreemptions.get());
}
preemptTasks(entry.getKey().getPriority(), vertexNum(taskInfo), 1, potentialHosts);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Not preempting for {} on potential hosts={}. An existing preemption request exists", taskInfo.task, Arrays.toString(potentialHosts));
}
}
} else {
// Either DELAYED_RESOURCES or DELAYED_LOCALITY with an unknown requested host.
// Request for a preemption if there's none pending. If a single preemption is pending,
// and this is the next task to be assigned, it will be assigned once that slot becomes available.
LOG.debug("Attempting to preempt on any host for task={}, pendingPreemptions={}", taskInfo.task, pendingPreemptions.get());
if (pendingPreemptions.get() == 0) {
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting to preempt for task={}, priority={} on any available host", taskInfo.task, taskInfo.priority);
}
preemptTasks(entry.getKey().getPriority(), vertexNum(taskInfo), 1, null);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Skipping preemption since there are {} pending preemption request. For task={}", pendingPreemptions.get(), taskInfo);
}
}
}
// Since there was an allocation failure - don't try assigning tasks at the next priority.
scheduledAllAtPriority = false;
// Don't break if this allocation failure was a result of a LOCALITY_DELAY. Others could still be allocated.
if (scheduleResult != ScheduleResult.DELAYED_LOCALITY) {
break;
}
}
// end of else - i.e. could not allocate
}
// end of loop over pending tasks
if (taskListAtPriority.isEmpty()) {
// Remove the entry, if there's nothing left at the specific priority level
pendingIterator.remove();
}
if (!scheduledAllAtPriority) {
LOG.debug("Unable to schedule all requests at priority={}. Skipping subsequent priority levels", entry.getKey());
// Don't attempt scheduling for additional priorities
break;
}
}
} finally {
writeLock.unlock();
}
if (downgradedTask.value != null) {
WM_LOG.info("Downgrading " + downgradedTask.value.attemptId);
checkAndSendGuaranteedStateUpdate(downgradedTask.value);
}
}
use of org.apache.hadoop.yarn.api.records.Priority in project incubator-gobblin by apache.
the class YarnService method requestContainer.
private void requestContainer(Optional<String> preferredNode) {
Priority priority = Records.newRecord(Priority.class);
priority.setPriority(0);
Resource capability = Records.newRecord(Resource.class);
int maxMemoryCapacity = this.maxResourceCapacity.get().getMemory();
capability.setMemory(this.requestedContainerMemoryMbs <= maxMemoryCapacity ? this.requestedContainerMemoryMbs : maxMemoryCapacity);
int maxCoreCapacity = this.maxResourceCapacity.get().getVirtualCores();
capability.setVirtualCores(this.requestedContainerCores <= maxCoreCapacity ? this.requestedContainerCores : maxCoreCapacity);
String[] preferredNodes = preferredNode.isPresent() ? new String[] { preferredNode.get() } : null;
this.amrmClientAsync.addContainerRequest(new AMRMClient.ContainerRequest(capability, preferredNodes, null, priority));
}
use of org.apache.hadoop.yarn.api.records.Priority in project drill by axbaretto.
the class ContainerRequestSpec method makeRequest.
/**
* Create a YARN ContainerRequest object from the information in this object.
*
* @return
*/
public ContainerRequest makeRequest() {
assert memoryMb != 0;
Priority priorityRec = Records.newRecord(Priority.class);
priorityRec.setPriority(priority);
Resource capability = Records.newRecord(Resource.class);
capability.setMemory(memoryMb);
capability.setVirtualCores(vCores);
DoYUtil.callSetDiskIfExists(capability, disks);
boolean relaxLocality = true;
String[] nodeArr = null;
if (!hosts.isEmpty()) {
nodeArr = new String[hosts.size()];
hosts.toArray(nodeArr);
relaxLocality = false;
}
String[] rackArr = null;
if (!racks.isEmpty()) {
nodeArr = new String[racks.size()];
racks.toArray(rackArr);
relaxLocality = false;
}
String nodeExpr = null;
if (!DoYUtil.isBlank(nodeLabelExpr)) {
nodeExpr = nodeLabelExpr;
LOG.info("Requesting a container using node expression: " + nodeExpr);
}
return new ContainerRequest(capability, nodeArr, rackArr, priorityRec, relaxLocality, nodeExpr);
}
Aggregations