use of org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl in project hadoop by apache.
the class TestClientRMService method getRMApp.
private RMAppImpl getRMApp(RMContext rmContext, YarnScheduler yarnScheduler, ApplicationId applicationId3, YarnConfiguration config, String queueName, final long memorySeconds, final long vcoreSeconds, String appNodeLabelExpression, String amNodeLabelExpression) {
ApplicationSubmissionContext asContext = mock(ApplicationSubmissionContext.class);
when(asContext.getMaxAppAttempts()).thenReturn(1);
when(asContext.getNodeLabelExpression()).thenReturn(appNodeLabelExpression);
when(asContext.getPriority()).thenReturn(Priority.newInstance(0));
RMAppImpl app = spy(new RMAppImpl(applicationId3, rmContext, config, null, null, queueName, asContext, yarnScheduler, null, System.currentTimeMillis(), "YARN", null, BuilderUtils.newResourceRequest(RMAppAttemptImpl.AM_CONTAINER_PRIORITY, ResourceRequest.ANY, Resource.newInstance(1024, 1), 1)) {
@Override
public ApplicationReport createAndGetApplicationReport(String clientUserName, boolean allowAccess) {
ApplicationReport report = super.createAndGetApplicationReport(clientUserName, allowAccess);
ApplicationResourceUsageReport usageReport = report.getApplicationResourceUsageReport();
usageReport.setMemorySeconds(memorySeconds);
usageReport.setVcoreSeconds(vcoreSeconds);
report.setApplicationResourceUsageReport(usageReport);
return report;
}
});
app.getAMResourceRequest().setNodeLabelExpression(amNodeLabelExpression);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(ApplicationId.newInstance(123456, 1), 1);
RMAppAttemptImpl rmAppAttemptImpl = spy(new RMAppAttemptImpl(attemptId, rmContext, yarnScheduler, null, asContext, config, null, app));
Container container = Container.newInstance(ContainerId.newContainerId(attemptId, 1), null, "", null, null, null);
RMContainerImpl containerimpl = spy(new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), attemptId, null, "", rmContext));
Map<ApplicationAttemptId, RMAppAttempt> attempts = new HashMap<ApplicationAttemptId, RMAppAttempt>();
attempts.put(attemptId, rmAppAttemptImpl);
when(app.getCurrentAppAttempt()).thenReturn(rmAppAttemptImpl);
when(app.getAppAttempts()).thenReturn(attempts);
when(app.getApplicationPriority()).thenReturn(Priority.newInstance(0));
when(rmAppAttemptImpl.getMasterContainer()).thenReturn(container);
ResourceScheduler rs = mock(ResourceScheduler.class);
when(rmContext.getScheduler()).thenReturn(rs);
when(rmContext.getScheduler().getRMContainer(any(ContainerId.class))).thenReturn(containerimpl);
SchedulerAppReport sAppReport = mock(SchedulerAppReport.class);
when(rmContext.getScheduler().getSchedulerAppInfo(any(ApplicationAttemptId.class))).thenReturn(sAppReport);
List<RMContainer> rmContainers = new ArrayList<RMContainer>();
rmContainers.add(containerimpl);
when(rmContext.getScheduler().getSchedulerAppInfo(attemptId).getLiveContainers()).thenReturn(rmContainers);
ContainerStatus cs = mock(ContainerStatus.class);
when(containerimpl.completed()).thenReturn(false);
when(containerimpl.getDiagnosticsInfo()).thenReturn("N/A");
when(containerimpl.getContainerExitStatus()).thenReturn(0);
when(containerimpl.getContainerState()).thenReturn(ContainerState.COMPLETE);
return app;
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl in project hadoop by apache.
the class RegularContainerAllocator method doAllocation.
ContainerAllocation doAllocation(ContainerAllocation allocationResult, FiCaSchedulerNode node, SchedulerRequestKey schedulerKey, RMContainer reservedContainer) {
// Create the container if necessary
Container container = getContainer(reservedContainer, node, allocationResult.getResourceToBeAllocated(), schedulerKey);
// something went wrong getting/creating the container
if (container == null) {
application.updateAppSkipNodeDiagnostics("Scheduling of container failed. ");
LOG.warn("Couldn't get container for allocation!");
ActivitiesLogger.APP.recordAppActivityWithoutAllocation(activitiesManager, node, application, schedulerKey.getPriority(), ActivityDiagnosticConstant.COULD_NOT_GET_CONTAINER, ActivityState.REJECTED);
return ContainerAllocation.APP_SKIPPED;
}
if (allocationResult.getAllocationState() == AllocationState.ALLOCATED) {
// When allocating container
allocationResult = handleNewContainerAllocation(allocationResult, node, schedulerKey, container);
} else {
// When reserving container
RMContainer updatedContainer = reservedContainer;
if (updatedContainer == null) {
updatedContainer = new RMContainerImpl(container, schedulerKey, application.getApplicationAttemptId(), node.getNodeID(), application.getAppSchedulingInfo().getUser(), rmContext);
}
allocationResult.updatedContainer = updatedContainer;
}
// reservedContainer != null, it's not the first time)
if (reservedContainer == null) {
// This helps apps with many off-cluster requests schedule faster.
if (allocationResult.containerNodeType != NodeType.OFF_SWITCH) {
if (LOG.isDebugEnabled()) {
LOG.debug("Resetting scheduling opportunities");
}
// RACK_LOCAL without delay.
if (allocationResult.containerNodeType == NodeType.NODE_LOCAL || application.getCSLeafQueue().getRackLocalityFullReset()) {
application.resetSchedulingOpportunities(schedulerKey);
}
}
// most likely allocated on non-labeled nodes first.
if (StringUtils.equals(node.getPartition(), RMNodeLabelsManager.NO_LABEL)) {
application.resetMissedNonPartitionedRequestSchedulingOpportunity(schedulerKey);
}
}
return allocationResult;
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl in project hadoop by apache.
the class SchedulerApplicationAttempt method reserve.
public RMContainer reserve(SchedulerNode node, SchedulerRequestKey schedulerKey, RMContainer rmContainer, Container container) {
try {
writeLock.lock();
// Create RMContainer if necessary
if (rmContainer == null) {
rmContainer = new RMContainerImpl(container, schedulerKey, getApplicationAttemptId(), node.getNodeID(), appSchedulingInfo.getUser(), rmContext);
}
if (rmContainer.getState() == RMContainerState.NEW) {
attemptResourceUsage.incReserved(node.getPartition(), container.getResource());
((RMContainerImpl) rmContainer).setQueueName(this.getQueueName());
// Reset the re-reservation count
resetReReservations(schedulerKey);
} else {
// Note down the re-reservation
addReReservation(schedulerKey);
}
commonReserve(node, schedulerKey, rmContainer, container.getResource());
return rmContainer;
} finally {
writeLock.unlock();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl in project hadoop by apache.
the class SchedulerApplicationAttempt method pullNewlyUpdatedContainers.
/**
* A container is promoted if its executionType is changed from
* OPPORTUNISTIC to GUARANTEED. It id demoted if the change is from
* GUARANTEED to OPPORTUNISTIC.
* @return Newly Promoted and Demoted containers
*/
private List<Container> pullNewlyUpdatedContainers(Map<ContainerId, RMContainer> newlyUpdatedContainers, ContainerUpdateType updateTpe) {
List<Container> updatedContainers = new ArrayList<>();
if (oppContainerContext == null && (ContainerUpdateType.DEMOTE_EXECUTION_TYPE == updateTpe || ContainerUpdateType.PROMOTE_EXECUTION_TYPE == updateTpe)) {
return updatedContainers;
}
try {
writeLock.lock();
Iterator<Map.Entry<ContainerId, RMContainer>> i = newlyUpdatedContainers.entrySet().iterator();
while (i.hasNext()) {
Map.Entry<ContainerId, RMContainer> entry = i.next();
ContainerId matchedContainerId = entry.getKey();
RMContainer tempRMContainer = entry.getValue();
RMContainer existingRMContainer = getRMContainer(matchedContainerId);
if (existingRMContainer != null) {
// swap containers
existingRMContainer = getUpdateContext().swapContainer(tempRMContainer, existingRMContainer, updateTpe);
getUpdateContext().removeFromOutstandingUpdate(tempRMContainer.getAllocatedSchedulerKey(), existingRMContainer.getContainer());
Container updatedContainer = updateContainerAndNMToken(existingRMContainer, updateTpe);
updatedContainers.add(updatedContainer);
}
tempContainerToKill.add(tempRMContainer);
i.remove();
}
// Release all temporary containers
Iterator<RMContainer> tempIter = tempContainerToKill.iterator();
while (tempIter.hasNext()) {
RMContainer c = tempIter.next();
// Mark container for release (set RRs to null, so RM does not think
// it is a recoverable container)
((RMContainerImpl) c).setResourceRequests(null);
((AbstractYarnScheduler) rmContext.getScheduler()).completedContainer(c, SchedulerUtils.createAbnormalContainerStatus(c.getContainerId(), SchedulerUtils.UPDATED_CONTAINER), RMContainerEventType.KILL);
tempIter.remove();
}
return updatedContainers;
} finally {
writeLock.unlock();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerImpl in project hadoop by apache.
the class SchedulerApplicationAttempt method move.
public void move(Queue newQueue) {
try {
writeLock.lock();
QueueMetrics oldMetrics = queue.getMetrics();
QueueMetrics newMetrics = newQueue.getMetrics();
String newQueueName = newQueue.getQueueName();
String user = getUser();
for (RMContainer liveContainer : liveContainers.values()) {
Resource resource = liveContainer.getContainer().getResource();
((RMContainerImpl) liveContainer).setQueueName(newQueueName);
oldMetrics.releaseResources(user, 1, resource);
newMetrics.allocateResources(user, 1, resource, false);
}
for (Map<NodeId, RMContainer> map : reservedContainers.values()) {
for (RMContainer reservedContainer : map.values()) {
((RMContainerImpl) reservedContainer).setQueueName(newQueueName);
Resource resource = reservedContainer.getReservedResource();
oldMetrics.unreserveResource(user, resource);
newMetrics.reserveResource(user, resource);
}
}
if (!isStopped) {
appSchedulingInfo.move(newQueue);
}
this.queue = newQueue;
} finally {
writeLock.unlock();
}
}
Aggregations