use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestNodeLabelContainerAllocation method testResourceRequestUpdateNodePartitions.
/**
* JIRA YARN-4140, In Resource request set node label will be set only on ANY
* reqest. RACK/NODE local and default requests label expression need to be
* updated. This testcase is to verify the label expression is getting changed
* based on ANY requests.
*
* @throws Exception
*/
@Test
public void testResourceRequestUpdateNodePartitions() throws Exception {
// set node -> label
mgr.addToCluserNodeLabels(ImmutableSet.of(NodeLabel.newInstance("x"), NodeLabel.newInstance("y", false), NodeLabel.newInstance("z", false)));
mgr.addLabelsToNode(ImmutableMap.of(NodeId.newInstance("h1", 0), toSet("y")));
// inject node label manager
MockRM rm1 = new MockRM(getConfigurationWithQueueLabels(conf)) {
@Override
public RMNodeLabelsManager createNodeLabelManager() {
return mgr;
}
};
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
// label = y
MockNM nm2 = rm1.registerNode("h2:1234", 40 * GB);
// launch an app to queue b1 (label = y), AM container should be launched in
// nm2
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b1");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm2);
// Creating request set when request before ANY is not having label and any
// is having label
List<ResourceRequest> resourceRequest = new ArrayList<ResourceRequest>();
resourceRequest.add(am1.createResourceReq("/default-rack", 1024, 3, 1, RMNodeLabelsManager.NO_LABEL));
resourceRequest.add(am1.createResourceReq("*", 1024, 3, 5, "y"));
resourceRequest.add(am1.createResourceReq("h1:1234", 1024, 3, 2, RMNodeLabelsManager.NO_LABEL));
resourceRequest.add(am1.createResourceReq("*", 1024, 2, 3, "y"));
resourceRequest.add(am1.createResourceReq("h2:1234", 1024, 2, 4, null));
resourceRequest.add(am1.createResourceReq("*", 1024, 4, 3, null));
resourceRequest.add(am1.createResourceReq("h2:1234", 1024, 4, 4, null));
am1.allocate(resourceRequest, new ArrayList<ContainerId>());
CapacityScheduler cs = (CapacityScheduler) rm1.getRMContext().getScheduler();
FiCaSchedulerApp app = cs.getApplicationAttempt(am1.getApplicationAttemptId());
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "y");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4, RMNodeLabelsManager.NO_LABEL);
// Previous any request was Y trying to update with z and the
// request before ANY label is null
List<ResourceRequest> newReq = new ArrayList<ResourceRequest>();
newReq.add(am1.createResourceReq("h2:1234", 1024, 3, 4, null));
newReq.add(am1.createResourceReq("*", 1024, 3, 5, "z"));
newReq.add(am1.createResourceReq("h1:1234", 1024, 3, 4, null));
newReq.add(am1.createResourceReq("*", 1024, 4, 5, "z"));
am1.allocate(newReq, new ArrayList<ContainerId>());
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, "z");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 4, "z");
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, "y");
// Request before ANY and ANY request is set as NULL. Request should be set
// with Empty Label
List<ResourceRequest> resourceRequest1 = new ArrayList<ResourceRequest>();
resourceRequest1.add(am1.createResourceReq("/default-rack", 1024, 3, 1, null));
resourceRequest1.add(am1.createResourceReq("*", 1024, 3, 5, null));
resourceRequest1.add(am1.createResourceReq("h1:1234", 1024, 3, 2, RMNodeLabelsManager.NO_LABEL));
resourceRequest1.add(am1.createResourceReq("/default-rack", 1024, 2, 1, null));
resourceRequest1.add(am1.createResourceReq("*", 1024, 2, 3, RMNodeLabelsManager.NO_LABEL));
resourceRequest1.add(am1.createResourceReq("h2:1234", 1024, 2, 4, null));
am1.allocate(resourceRequest1, new ArrayList<ContainerId>());
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 3, RMNodeLabelsManager.NO_LABEL);
checkNodePartitionOfRequestedPriority(app.getAppSchedulingInfo(), 2, RMNodeLabelsManager.NO_LABEL);
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project weave by continuuity.
the class AMRMClientImpl method decResourceRequest.
private void decResourceRequest(Priority priority, String resourceName, Resource capability, int containerCount) {
Map<String, Map<Resource, ResourceRequest>> remoteRequests = this.remoteRequestsTable.get(priority);
if (remoteRequests == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Not decrementing resource as priority " + priority + " is not present in request table");
}
return;
}
Map<Resource, ResourceRequest> reqMap = remoteRequests.get(resourceName);
if (reqMap == null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Not decrementing resource as " + resourceName + " is not present in request table");
}
return;
}
ResourceRequest remoteRequest = reqMap.get(capability);
if (LOG.isDebugEnabled()) {
LOG.debug("BEFORE decResourceRequest:" + " applicationId=" + appAttemptId + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + remoteRequest.getNumContainers() + " #asks=" + ask.size());
}
remoteRequest.setNumContainers(remoteRequest.getNumContainers() - containerCount);
if (remoteRequest.getNumContainers() < 0) {
// guard against spurious removals
remoteRequest.setNumContainers(0);
}
// Send the ResourceRequest to RM even if is 0 because it needs to override
// a previously sent value. If ResourceRequest was not sent previously then
// sending 0 ought to be a no-op on RM.
addResourceRequestToAsk(remoteRequest);
// Delete entries from map if no longer needed.
if (remoteRequest.getNumContainers() == 0) {
reqMap.remove(capability);
if (reqMap.size() == 0) {
remoteRequests.remove(resourceName);
}
if (remoteRequests.size() == 0) {
remoteRequestsTable.remove(priority);
}
}
if (LOG.isDebugEnabled()) {
LOG.info("AFTER decResourceRequest:" + " applicationId=" + appAttemptId + " priority=" + priority.getPriority() + " resourceName=" + resourceName + " numContainers=" + remoteRequest.getNumContainers() + " #asks=" + ask.size());
}
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestOpportunisticContainerAllocatorAMService method createService.
private OpportunisticContainerAllocatorAMService createService(final RecordFactory factory, final RMContext rmContext, final Container c) {
return new OpportunisticContainerAllocatorAMService(rmContext, null) {
@Override
public RegisterApplicationMasterResponse registerApplicationMaster(RegisterApplicationMasterRequest request) throws YarnException, IOException {
RegisterApplicationMasterResponse resp = factory.newRecordInstance(RegisterApplicationMasterResponse.class);
// Dummy Entry to Assert that we get this object back
resp.setQueue("dummyQueue");
return resp;
}
@Override
public FinishApplicationMasterResponse finishApplicationMaster(FinishApplicationMasterRequest request) throws YarnException, IOException {
FinishApplicationMasterResponse resp = factory.newRecordInstance(FinishApplicationMasterResponse.class);
// Dummy Entry to Assert that we get this object back
resp.setIsUnregistered(false);
return resp;
}
@Override
public AllocateResponse allocate(AllocateRequest request) throws YarnException, IOException {
AllocateResponse response = factory.newRecordInstance(AllocateResponse.class);
response.setNumClusterNodes(12345);
response.setAllocatedContainers(Arrays.asList(c));
return response;
}
@Override
public RegisterDistributedSchedulingAMResponse registerApplicationMasterForDistributedScheduling(RegisterApplicationMasterRequest request) throws YarnException, IOException {
RegisterDistributedSchedulingAMResponse resp = factory.newRecordInstance(RegisterDistributedSchedulingAMResponse.class);
resp.setContainerIdStart(54321L);
resp.setMaxContainerResource(Resource.newInstance(4096, 4));
resp.setMinContainerResource(Resource.newInstance(1024, 1));
resp.setIncrContainerResource(Resource.newInstance(2048, 2));
return resp;
}
@Override
public DistributedSchedulingAllocateResponse allocateForDistributedScheduling(DistributedSchedulingAllocateRequest request) throws YarnException, IOException {
List<ResourceRequest> askList = request.getAllocateRequest().getAskList();
List<Container> allocatedContainers = request.getAllocatedContainers();
Assert.assertEquals(1, allocatedContainers.size());
Assert.assertEquals(ExecutionType.OPPORTUNISTIC, allocatedContainers.get(0).getExecutionType());
Assert.assertEquals(1, askList.size());
Assert.assertTrue(askList.get(0).getExecutionTypeRequest().getEnforceExecutionType());
DistributedSchedulingAllocateResponse resp = factory.newRecordInstance(DistributedSchedulingAllocateResponse.class);
resp.setNodesForScheduling(Arrays.asList(RemoteNode.newInstance(NodeId.newInstance("h1", 1234), "http://h1:4321")));
return resp;
}
};
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestAppManager method testQueueSubmitWithNoPermission.
@Test
public void testQueueSubmitWithNoPermission() throws IOException {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getCanonicalName());
conf.set(PREFIX + "root.acl_submit_applications", " ");
conf.set(PREFIX + "root.acl_administer_queue", " ");
conf.set(PREFIX + "root.default.acl_submit_applications", " ");
conf.set(PREFIX + "root.default.acl_administer_queue", " ");
conf.set(YarnConfiguration.YARN_ACL_ENABLE, "true");
MockRM mockRM = new MockRM(conf);
ClientRMService rmService = mockRM.getClientRMService();
SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class);
ApplicationSubmissionContext sub = Records.newRecord(ApplicationSubmissionContext.class);
sub.setApplicationId(appId);
ResourceRequest resReg = ResourceRequest.newInstance(Priority.newInstance(0), ResourceRequest.ANY, Resource.newInstance(1024, 1), 1);
sub.setAMContainerResourceRequest(resReg);
req.setApplicationSubmissionContext(sub);
sub.setAMContainerSpec(mock(ContainerLaunchContext.class));
try {
rmService.submitApplication(req);
} catch (Exception e) {
e.printStackTrace();
if (e instanceof YarnException) {
Assert.assertTrue(e.getCause() instanceof AccessControlException);
} else {
Assert.fail("Yarn exception is expected : " + e.getMessage());
}
} finally {
mockRM.close();
}
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestApplicationCleanup method testProcessingNMContainerStatusesOnNMRestart.
// The test verifies processing of NMContainerStatuses which are sent during
// NM registration.
// 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
// 2. AM sends ResourceRequest for 1 container with memory 2048MB.
// 3. Verify for number of container allocated by RM
// 4. Verify Memory Usage by cluster, it should be 3072. AM memory + requested
// memory. 1024 + 2048=3072
// 5. Re-register NM by sending completed container status
// 6. Verify for Memory Used, it should be 1024
// 7. Send AM heatbeat to RM. Allocated response should contain completed
// container.
@Test(timeout = 60000)
public void testProcessingNMContainerStatusesOnNMRestart() throws Exception {
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 1);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
// 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
MockRM rm1 = new MockRM(conf, memStore);
rm1.start();
int nmMemory = 8192;
int amMemory = 1024;
int containerMemory = 2048;
MockNM nm1 = new MockNM("127.0.0.1:1234", nmMemory, rm1.getResourceTrackerService());
nm1.registerNode();
RMApp app0 = rm1.submitApp(amMemory);
MockAM am0 = MockRM.launchAndRegisterAM(app0, rm1, nm1);
// 2. AM sends ResourceRequest for 1 container with memory 2048MB.
int noOfContainers = 1;
List<Container> allocateContainers = am0.allocateAndWaitForContainers(noOfContainers, containerMemory, nm1);
// 3. Verify for number of container allocated by RM
Assert.assertEquals(noOfContainers, allocateContainers.size());
Container container = allocateContainers.get(0);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(), 1, ContainerState.RUNNING);
nm1.nodeHeartbeat(am0.getApplicationAttemptId(), container.getId().getContainerId(), ContainerState.RUNNING);
rm1.waitForState(app0.getApplicationId(), RMAppState.RUNNING);
// 4. Verify Memory Usage by cluster, it should be 3072. AM memory +
// requested memory. 1024 + 2048=3072
ResourceScheduler rs = rm1.getRMContext().getScheduler();
long allocatedMB = rs.getRootQueueMetrics().getAllocatedMB();
Assert.assertEquals(amMemory + containerMemory, allocatedMB);
// 5. Re-register NM by sending completed container status
List<NMContainerStatus> nMContainerStatusForApp = createNMContainerStatusForApp(am0);
nm1.registerNode(nMContainerStatusForApp, Arrays.asList(app0.getApplicationId()));
waitForClusterMemory(nm1, rs, amMemory);
// 6. Verify for Memory Used, it should be 1024
Assert.assertEquals(amMemory, rs.getRootQueueMetrics().getAllocatedMB());
// 7. Send AM heatbeat to RM. Allocated response should contain completed
// container
AllocateRequest req = AllocateRequest.newInstance(0, 0F, new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>(), null);
AllocateResponse allocate = am0.allocate(req);
List<ContainerStatus> completedContainersStatuses = allocate.getCompletedContainersStatuses();
Assert.assertEquals(noOfContainers, completedContainersStatuses.size());
// Application clean up should happen Cluster memory used is 0
nm1.nodeHeartbeat(am0.getApplicationAttemptId(), 1, ContainerState.COMPLETE);
waitForClusterMemory(nm1, rs, 0);
rm1.stop();
}
Aggregations