use of org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse in project hadoop by apache.
the class TestApplicationMasterService method testInvalidIncreaseDecreaseRequest.
@Test(timeout = 60000)
public void testInvalidIncreaseDecreaseRequest() throws Exception {
conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class, ResourceScheduler.class);
MockRM rm = new MockRM(conf);
try {
rm.start();
// Register node1
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
// Submit an application
RMApp app1 = rm.submitApp(1024);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
RegisterApplicationMasterResponse registerResponse = am1.registerAppAttempt();
sentRMContainerLaunched(rm, ContainerId.newContainerId(am1.getApplicationAttemptId(), 1));
// Ask for a normal increase should be successfull
am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, ContainerId.newContainerId(attempt1.getAppAttemptId(), 1), ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(2048), null)));
// Target resource is negative, should fail
AllocateResponse response = am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, ContainerId.newContainerId(attempt1.getAppAttemptId(), 1), ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(-1), null)));
Assert.assertEquals(1, response.getUpdateErrors().size());
Assert.assertEquals("RESOURCE_OUTSIDE_ALLOWED_RANGE", response.getUpdateErrors().get(0).getReason());
// Target resource is more than maxAllocation, should fail
response = am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, ContainerId.newContainerId(attempt1.getAppAttemptId(), 1), ContainerUpdateType.INCREASE_RESOURCE, Resources.add(registerResponse.getMaximumResourceCapability(), Resources.createResource(1)), null)));
Assert.assertEquals(1, response.getUpdateErrors().size());
Assert.assertEquals("RESOURCE_OUTSIDE_ALLOWED_RANGE", response.getUpdateErrors().get(0).getReason());
// Contains multiple increase/decrease requests for same contaienrId
response = am1.sendContainerResizingRequest(Arrays.asList(UpdateContainerRequest.newInstance(0, ContainerId.newContainerId(attempt1.getAppAttemptId(), 1), ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(2048, 4), null), UpdateContainerRequest.newInstance(0, ContainerId.newContainerId(attempt1.getAppAttemptId(), 1), ContainerUpdateType.DECREASE_RESOURCE, Resources.createResource(1024, 1), null)));
Assert.assertEquals(1, response.getUpdateErrors().size());
Assert.assertEquals("UPDATE_OUTSTANDING_ERROR", response.getUpdateErrors().get(0).getReason());
} finally {
if (rm != null) {
rm.close();
}
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse in project hadoop by apache.
the class TestApplicationMasterService method testRMIdentifierOnContainerAllocation.
@Test(timeout = 3000000)
public void testRMIdentifierOnContainerAllocation() throws Exception {
MockRM rm = new MockRM(conf);
rm.start();
// Register node1
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
// Submit an application
RMApp app1 = rm.submitApp(2048);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[] { "127.0.0.1" }, GB, 1, 1);
// send the request
AllocateResponse alloc1Response = am1.schedule();
// kick the scheduler
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response = am1.schedule();
}
// assert RMIdentifer is set properly in allocated containers
Container allocatedContainer = alloc1Response.getAllocatedContainers().get(0);
ContainerTokenIdentifier tokenId = BuilderUtils.newContainerTokenIdentifier(allocatedContainer.getContainerToken());
Assert.assertEquals(MockRM.getClusterTimeStamp(), tokenId.getRMIdentifier());
rm.stop();
}
use of org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse in project hadoop by apache.
the class TestApplicationMasterService method testInvalidContainerReleaseRequest.
@Test(timeout = 600000)
public void testInvalidContainerReleaseRequest() throws Exception {
MockRM rm = new MockRM(conf);
try {
rm.start();
// Register node1
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
// Submit an application
RMApp app1 = rm.submitApp(1024);
// kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
am1.addRequests(new String[] { "127.0.0.1" }, GB, 1, 1);
// send the request
AllocateResponse alloc1Response = am1.schedule();
// kick the scheduler
nm1.nodeHeartbeat(true);
while (alloc1Response.getAllocatedContainers().size() < 1) {
LOG.info("Waiting for containers to be created for app 1...");
sleep(1000);
alloc1Response = am1.schedule();
}
Assert.assertTrue(alloc1Response.getAllocatedContainers().size() > 0);
RMApp app2 = rm.submitApp(1024);
nm1.nodeHeartbeat(true);
RMAppAttempt attempt2 = app2.getCurrentAppAttempt();
MockAM am2 = rm.sendAMLaunched(attempt2.getAppAttemptId());
am2.registerAppAttempt();
// Now trying to release container allocated for app1 -> appAttempt1.
ContainerId cId = alloc1Response.getAllocatedContainers().get(0).getId();
am2.addContainerToBeReleased(cId);
try {
am2.schedule();
Assert.fail("Exception was expected!!");
} catch (InvalidContainerReleaseException e) {
StringBuilder sb = new StringBuilder("Cannot release container : ");
sb.append(cId.toString());
sb.append(" not belonging to this application attempt : ");
sb.append(attempt2.getAppAttemptId().toString());
Assert.assertTrue(e.getMessage().contains(sb.toString()));
}
} finally {
if (rm != null) {
rm.stop();
}
}
}
use of org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse in project hadoop by apache.
the class TestApplicationCleanup method testProcessingNMContainerStatusesOnNMRestart.
// The test verifies processing of NMContainerStatuses which are sent during
// NM registration.
// 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
// 2. AM sends ResourceRequest for 1 container with memory 2048MB.
// 3. Verify for number of container allocated by RM
// 4. Verify Memory Usage by cluster, it should be 3072. AM memory + requested
// memory. 1024 + 2048=3072
// 5. Re-register NM by sending completed container status
// 6. Verify for Memory Used, it should be 1024
// 7. Send AM heatbeat to RM. Allocated response should contain completed
// container.
@Test(timeout = 60000)
public void testProcessingNMContainerStatusesOnNMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
// 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
MockRM rm1 = new MockRM(conf, memStore);
rm1.start();
int nmMemory = 8192;
int amMemory = 1024;
int containerMemory = 2048;
MockNM nm1 = new MockNM("127.0.0.1:1234", nmMemory, rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0 = rm1.submitApp(amMemory);
MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
// 2. AM sends ResourceRequest for 1 container with memory 2048MB.
int noOfContainers = 1;
List<Container> allocateContainers = am0.allocateAndWaitForContainers(noOfContainers, containerMemory, nm1);
// 3. Verify for number of container allocated by RM
Assert.assertEquals(noOfContainers, allocateContainers.size());
Container container = allocateContainers.get(0);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(), 1, ContainerState.RUNNING);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(), container.getId().getContainerId(), ContainerState.RUNNING);
rm1.waitForState(app0.getApplicationId(), RMAppState.RUNNING);
// 4. Verify Memory Usage by cluster, it should be 3072. AM memory +
// requested memory. 1024 + 2048=3072
ResourceScheduler rs = rm1.getRMContext().getScheduler();
long allocatedMB = rs.getRootQueueMetrics().getAllocatedMB();
Assert.assertEquals(amMemory + containerMemory, allocatedMB);
// 5. Re-register NM by sending completed container status
List<NMContainerStatus> nMContainerStatusForApp = createNMContainerStatusForApp(am0);
nm1.registerNode(nMContainerStatusForApp, Arrays.asList(app0.getApplicationId()));
waitForClusterMemory(nm1, rs, amMemory);
// 6. Verify for Memory Used, it should be 1024
Assert.assertEquals(amMemory, rs.getRootQueueMetrics().getAllocatedMB());
// 7. Send AM heatbeat to RM. Allocated response should contain completed
// container
AllocateRequest req = AllocateRequest.newInstance(0, 0F, new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>(), null);
AllocateResponse allocate = am0.allocate(req);
List<ContainerStatus> completedContainersStatuses = allocate.getCompletedContainersStatuses();
Assert.assertEquals(noOfContainers, completedContainersStatuses.size());
// Application clean up should happen Cluster memory used is 0
nm1.nodeHeartbeat(am0.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
waitForClusterMemory(nm1, rs, 0);
rm1.stop();
}
use of org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse in project hadoop by apache.
the class TestRecordFactory method testPbRecordFactory.
@Test
public void testPbRecordFactory() {
RecordFactory pbRecordFactory = RecordFactoryPBImpl.get();
try {
AllocateResponse response = pbRecordFactory.newRecordInstance(AllocateResponse.class);
Assert.assertEquals(AllocateResponsePBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
try {
AllocateRequest response = pbRecordFactory.newRecordInstance(AllocateRequest.class);
Assert.assertEquals(AllocateRequestPBImpl.class, response.getClass());
} catch (YarnRuntimeException e) {
e.printStackTrace();
Assert.fail("Failed to crete record");
}
}
Aggregations