use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.
the class TestContainerSchedulerQueuing method testKillMultipleOpportunisticContainers.
/**
* Submit three OPPORTUNISTIC containers that can run concurrently, and one
* GUARANTEED that needs to kill two of the OPPORTUNISTIC for it to run.
* @throws Exception
*/
@Test
public void testKillMultipleOpportunisticContainers() throws Exception {
containerManager.start();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(512, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(512, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(512, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(3), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(1500, 1), context.getContainerTokenSecretManager(), null, ExecutionType.GUARANTEED)));
allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
BaseContainerManagerTest.waitForNMContainerState(containerManager, createContainerId(0), Arrays.asList(ContainerState.DONE, ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL), 40);
Thread.sleep(5000);
// Get container statuses. Container 0 should be killed, container 1
// should be queued and container 2 should be running.
int killedContainers = 0;
List<ContainerId> statList = new ArrayList<ContainerId>();
for (int i = 0; i < 4; i++) {
statList.add(createContainerId(i));
}
GetContainerStatusesRequest statRequest = GetContainerStatusesRequest.newInstance(statList);
List<ContainerStatus> containerStatuses = containerManager.getContainerStatuses(statRequest).getContainerStatuses();
for (ContainerStatus status : containerStatuses) {
if (status.getDiagnostics().contains("Container Killed to make room for Guaranteed Container")) {
killedContainers++;
}
System.out.println("\nStatus : [" + status + "]\n");
}
Assert.assertEquals(2, killedContainers);
}
use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.
the class TestContainerSchedulerQueuing method testStartAndQueueMultipleContainers.
/**
* Starts one OPPORTUNISTIC container that takes up the whole node's
* resources, and submit two more that will be queued.
* @throws Exception
*/
@Test
public void testStartAndQueueMultipleContainers() throws Exception {
containerManager.start();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(2048, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(1024, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(1024, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
Thread.sleep(5000);
// Ensure first container is running and others are queued.
List<ContainerId> statList = new ArrayList<ContainerId>();
for (int i = 0; i < 3; i++) {
statList.add(createContainerId(i));
}
GetContainerStatusesRequest statRequest = GetContainerStatusesRequest.newInstance(Arrays.asList(createContainerId(0)));
List<ContainerStatus> containerStatuses = containerManager.getContainerStatuses(statRequest).getContainerStatuses();
for (ContainerStatus status : containerStatuses) {
if (status.getContainerId().equals(createContainerId(0))) {
Assert.assertEquals(org.apache.hadoop.yarn.api.records.ContainerState.RUNNING, status.getState());
} else {
Assert.assertEquals(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED, status.getState());
}
}
ContainerScheduler containerScheduler = containerManager.getContainerScheduler();
// Ensure two containers are properly queued.
Assert.assertEquals(2, containerScheduler.getNumQueuedContainers());
Assert.assertEquals(0, containerScheduler.getNumQueuedGuaranteedContainers());
Assert.assertEquals(2, containerScheduler.getNumQueuedOpportunisticContainers());
}
use of org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest in project hadoop by apache.
the class TestContainerSchedulerQueuing method testStopQueuedContainer.
/**
* Start running one GUARANTEED container and queue two OPPORTUNISTIC ones.
* Try killing one of the two queued containers.
* @throws Exception
*/
@Test
public void testStopQueuedContainer() throws Exception {
containerManager.start();
ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class);
List<StartContainerRequest> list = new ArrayList<>();
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(0), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(2048, 1), context.getContainerTokenSecretManager(), null, ExecutionType.GUARANTEED)));
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(1), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(512, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
list.add(StartContainerRequest.newInstance(containerLaunchContext, createContainerToken(createContainerId(2), DUMMY_RM_IDENTIFIER, context.getNodeId(), user, BuilderUtils.newResource(512, 1), context.getContainerTokenSecretManager(), null, ExecutionType.OPPORTUNISTIC)));
StartContainersRequest allRequests = StartContainersRequest.newInstance(list);
containerManager.startContainers(allRequests);
Thread.sleep(2000);
// Assert there is initially one container running and two queued.
int runningContainersNo = 0;
int queuedContainersNo = 0;
List<ContainerId> statList = new ArrayList<ContainerId>();
for (int i = 0; i < 3; i++) {
statList.add(createContainerId(i));
}
GetContainerStatusesRequest statRequest = GetContainerStatusesRequest.newInstance(statList);
List<ContainerStatus> containerStatuses = containerManager.getContainerStatuses(statRequest).getContainerStatuses();
for (ContainerStatus status : containerStatuses) {
if (status.getState() == org.apache.hadoop.yarn.api.records.ContainerState.RUNNING) {
runningContainersNo++;
} else if (status.getState() == org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED) {
queuedContainersNo++;
}
System.out.println("\nStatus : [" + status + "]\n");
}
Assert.assertEquals(1, runningContainersNo);
Assert.assertEquals(2, queuedContainersNo);
// Stop one of the two queued containers.
StopContainersRequest stopRequest = StopContainersRequest.newInstance(Arrays.asList(createContainerId(1)));
containerManager.stopContainers(stopRequest);
Thread.sleep(2000);
// Assert queued container got properly stopped.
statList.clear();
for (int i = 0; i < 3; i++) {
statList.add(createContainerId(i));
}
statRequest = GetContainerStatusesRequest.newInstance(statList);
HashMap<org.apache.hadoop.yarn.api.records.ContainerState, ContainerStatus> map = new HashMap<>();
for (int i = 0; i < 10; i++) {
containerStatuses = containerManager.getContainerStatuses(statRequest).getContainerStatuses();
for (ContainerStatus status : containerStatuses) {
System.out.println("\nStatus : [" + status + "]\n");
map.put(status.getState(), status);
if (map.containsKey(org.apache.hadoop.yarn.api.records.ContainerState.RUNNING) && map.containsKey(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED) && map.containsKey(org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE)) {
break;
}
Thread.sleep(1000);
}
}
Assert.assertEquals(createContainerId(0), map.get(org.apache.hadoop.yarn.api.records.ContainerState.RUNNING).getContainerId());
Assert.assertEquals(createContainerId(1), map.get(org.apache.hadoop.yarn.api.records.ContainerState.COMPLETE).getContainerId());
Assert.assertEquals(createContainerId(2), map.get(org.apache.hadoop.yarn.api.records.ContainerState.SCHEDULED).getContainerId());
}
Aggregations