use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.
the class RMContainerRequestor method makeRemoteRequest.
protected AllocateResponse makeRemoteRequest() throws YarnException, IOException {
applyRequestLimits();
ResourceBlacklistRequest blacklistRequest = ResourceBlacklistRequest.newInstance(new ArrayList<String>(blacklistAdditions), new ArrayList<String>(blacklistRemovals));
AllocateRequest allocateRequest = AllocateRequest.newInstance(lastResponseID, super.getApplicationProgress(), new ArrayList<ResourceRequest>(ask), new ArrayList<ContainerId>(release), blacklistRequest);
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
lastResponseID = allocateResponse.getResponseId();
availableResources = allocateResponse.getAvailableResources();
lastClusterNmCount = clusterNmCount;
clusterNmCount = allocateResponse.getNumClusterNodes();
int numCompletedContainers = allocateResponse.getCompletedContainersStatuses().size();
if (ask.size() > 0 || release.size() > 0) {
LOG.info("getResources() for " + applicationId + ":" + " ask=" + ask.size() + " release= " + release.size() + " newContainers=" + allocateResponse.getAllocatedContainers().size() + " finishedContainers=" + numCompletedContainers + " resourcelimit=" + availableResources + " knownNMs=" + clusterNmCount);
}
ask.clear();
release.clear();
if (numCompletedContainers > 0) {
// re-send limited requests when a container completes to trigger asking
// for more containers
requestLimitsToUpdate.addAll(requestLimits.keySet());
}
if (blacklistAdditions.size() > 0 || blacklistRemovals.size() > 0) {
LOG.info("Update the blacklist for " + applicationId + ": blacklistAdditions=" + blacklistAdditions.size() + " blacklistRemovals=" + blacklistRemovals.size());
}
blacklistAdditions.clear();
blacklistRemovals.clear();
return allocateResponse;
}
use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.
the class CheckpointAMPreemptionPolicy method preempt.
@Override
public void preempt(Context ctxt, PreemptionMessage preemptionRequests) {
if (preemptionRequests != null) {
// handling non-negotiable preemption
StrictPreemptionContract cStrict = preemptionRequests.getStrictContract();
if (cStrict != null && cStrict.getContainers() != null && cStrict.getContainers().size() > 0) {
LOG.info("strict preemption :" + preemptionRequests.getStrictContract().getContainers().size() + " containers to kill");
// handle strict preemptions. These containers are non-negotiable
for (PreemptionContainer c : preemptionRequests.getStrictContract().getContainers()) {
ContainerId reqCont = c.getId();
TaskAttemptId reqTask = ctxt.getTaskAttempt(reqCont);
if (reqTask != null) {
// ignore requests for preempting containers running maps
if (org.apache.hadoop.mapreduce.v2.api.records.TaskType.REDUCE.equals(reqTask.getTaskId().getTaskType())) {
toBePreempted.add(reqTask);
LOG.info("preempting " + reqCont + " running task:" + reqTask);
} else {
LOG.info("NOT preempting " + reqCont + " running task:" + reqTask);
}
}
}
}
// handling negotiable preemption
PreemptionContract cNegot = preemptionRequests.getContract();
if (cNegot != null && cNegot.getResourceRequest() != null && cNegot.getResourceRequest().size() > 0 && cNegot.getContainers() != null && cNegot.getContainers().size() > 0) {
LOG.info("negotiable preemption :" + preemptionRequests.getContract().getResourceRequest().size() + " resourceReq, " + preemptionRequests.getContract().getContainers().size() + " containers");
// handle fungible preemption. Here we only look at the total amount of
// resources to be preempted and pick enough of our containers to
// satisfy that. We only support checkpointing for reducers for now.
List<PreemptionResourceRequest> reqResources = preemptionRequests.getContract().getResourceRequest();
// compute the total amount of pending preemptions (to be discounted
// from current request)
int pendingPreemptionRam = 0;
int pendingPreemptionCores = 0;
for (Resource r : pendingFlexiblePreemptions.values()) {
pendingPreemptionRam += r.getMemorySize();
pendingPreemptionCores += r.getVirtualCores();
}
// discount preemption request based on currently pending preemption
for (PreemptionResourceRequest rr : reqResources) {
ResourceRequest reqRsrc = rr.getResourceRequest();
if (!ResourceRequest.ANY.equals(reqRsrc.getResourceName())) {
// For now, only respond to aggregate requests and ignore locality
continue;
}
LOG.info("ResourceRequest:" + reqRsrc);
int reqCont = reqRsrc.getNumContainers();
long reqMem = reqRsrc.getCapability().getMemorySize();
long totalMemoryToRelease = reqCont * reqMem;
int reqCores = reqRsrc.getCapability().getVirtualCores();
int totalCoresToRelease = reqCont * reqCores;
// remove
if (pendingPreemptionRam > 0) {
// if goes negative we simply exit
totalMemoryToRelease -= pendingPreemptionRam;
// decrement pending resources if zero or negatve we will
// ignore it while processing next PreemptionResourceRequest
pendingPreemptionRam -= totalMemoryToRelease;
}
if (pendingPreemptionCores > 0) {
totalCoresToRelease -= pendingPreemptionCores;
pendingPreemptionCores -= totalCoresToRelease;
}
// reverse order of allocation (for now)
List<Container> listOfCont = ctxt.getContainers(TaskType.REDUCE);
Collections.sort(listOfCont, new Comparator<Container>() {
@Override
public int compare(final Container o1, final Container o2) {
return o2.getId().compareTo(o1.getId());
}
});
// preempt reducers first
for (Container cont : listOfCont) {
if (totalMemoryToRelease <= 0 && totalCoresToRelease <= 0) {
break;
}
TaskAttemptId reduceId = ctxt.getTaskAttempt(cont.getId());
int cMem = (int) cont.getResource().getMemorySize();
int cCores = cont.getResource().getVirtualCores();
if (!toBePreempted.contains(reduceId)) {
totalMemoryToRelease -= cMem;
totalCoresToRelease -= cCores;
toBePreempted.add(reduceId);
pendingFlexiblePreemptions.put(reduceId, cont.getResource());
}
LOG.info("ResourceRequest:" + reqRsrc + " satisfied preempting " + reduceId);
}
// if map was preemptable we would do add them to toBePreempted here
}
}
}
}
use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.
the class KillAMPreemptionPolicy method killContainer.
@SuppressWarnings("unchecked")
private void killContainer(Context ctxt, PreemptionContainer c) {
ContainerId reqCont = c.getId();
TaskAttemptId reqTask = ctxt.getTaskAttempt(reqCont);
LOG.info("Evicting " + reqTask);
dispatcher.handle(new TaskAttemptEvent(reqTask, TaskAttemptEventType.TA_KILL));
// add preemption to counters
JobCounterUpdateEvent jce = new JobCounterUpdateEvent(reqTask.getTaskId().getJobId());
jce.addCounterUpdate(JobCounter.TASKS_REQ_PREEMPT, 1);
dispatcher.handle(jce);
}
use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.
the class TestLocalContainerAllocator method testAllocatedContainerResourceIsNotNull.
@Test
public void testAllocatedContainerResourceIsNotNull() {
ArgumentCaptor<TaskAttemptContainerAssignedEvent> containerAssignedCaptor = ArgumentCaptor.forClass(TaskAttemptContainerAssignedEvent.class);
@SuppressWarnings("unchecked") EventHandler<Event> eventHandler = mock(EventHandler.class);
AppContext context = mock(AppContext.class);
when(context.getEventHandler()).thenReturn(eventHandler);
ContainerId containerId = ContainerId.fromString("container_1427562107907_0002_01_000001");
LocalContainerAllocator containerAllocator = new LocalContainerAllocator(mock(ClientService.class), context, "localhost", -1, -1, containerId);
ContainerAllocatorEvent containerAllocatorEvent = createContainerRequestEvent();
containerAllocator.handle(containerAllocatorEvent);
verify(eventHandler, times(1)).handle(containerAssignedCaptor.capture());
Container container = containerAssignedCaptor.getValue().getContainer();
Resource containerResource = container.getResource();
Assert.assertNotNull(containerResource);
Assert.assertEquals(containerResource.getMemorySize(), 0);
Assert.assertEquals(containerResource.getVirtualCores(), 0);
}
use of org.apache.hadoop.yarn.api.records.ContainerId in project hadoop by apache.
the class TestContainerLauncher method testPoolSize.
@Test(timeout = 10000)
public void testPoolSize() throws InterruptedException {
ApplicationId appId = ApplicationId.newInstance(12345, 67);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 3);
JobId jobId = MRBuilderUtils.newJobId(appId, 8);
TaskId taskId = MRBuilderUtils.newTaskId(jobId, 9, TaskType.MAP);
AppContext context = mock(AppContext.class);
CustomContainerLauncher containerLauncher = new CustomContainerLauncher(context);
containerLauncher.init(new Configuration());
containerLauncher.start();
ThreadPoolExecutor threadPool = containerLauncher.getThreadPool();
// No events yet
Assert.assertEquals(containerLauncher.initialPoolSize, MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE);
Assert.assertEquals(0, threadPool.getPoolSize());
Assert.assertEquals(containerLauncher.initialPoolSize, threadPool.getCorePoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.expectedCorePoolSize = containerLauncher.initialPoolSize;
for (int i = 0; i < 10; i++) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, i);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 10);
Assert.assertEquals(10, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// Same set of hosts, so no change
containerLauncher.finishEventHandling = true;
int timeOut = 0;
while (containerLauncher.numEventsProcessed.get() < 10 && timeOut++ < 200) {
LOG.info("Waiting for number of events processed to become " + 10 + ". It is now " + containerLauncher.numEventsProcessed.get() + ". Timeout is " + timeOut);
Thread.sleep(1000);
}
Assert.assertEquals(10, containerLauncher.numEventsProcessed.get());
containerLauncher.finishEventHandling = false;
for (int i = 0; i < 10; i++) {
ContainerId containerId = ContainerId.newContainerId(appAttemptId, i + 10);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, i + 10);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host" + i + ":1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
}
waitForEvents(containerLauncher, 20);
Assert.assertEquals(10, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
// Different hosts, there should be an increase in core-thread-pool size to
// 21(11hosts+10buffer)
// Core pool size should be 21 but the live pool size should be only 11.
containerLauncher.expectedCorePoolSize = 11 + containerLauncher.initialPoolSize;
containerLauncher.finishEventHandling = false;
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 21);
TaskAttemptId taskAttemptId = MRBuilderUtils.newTaskAttemptId(taskId, 21);
containerLauncher.handle(new ContainerLauncherEvent(taskAttemptId, containerId, "host11:1234", null, ContainerLauncher.EventType.CONTAINER_REMOTE_LAUNCH));
waitForEvents(containerLauncher, 21);
Assert.assertEquals(11, threadPool.getPoolSize());
Assert.assertNull(containerLauncher.foundErrors);
containerLauncher.stop();
// change configuration MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE
// and verify initialPoolSize value.
Configuration conf = new Configuration();
conf.setInt(MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREADPOOL_INITIAL_SIZE, 20);
containerLauncher = new CustomContainerLauncher(context);
containerLauncher.init(conf);
Assert.assertEquals(containerLauncher.initialPoolSize, 20);
}
Aggregations