use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestSignalContainer method testSignalRequestDeliveryToNM.
@Test
public void testSignalRequestDeliveryToNM() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM();
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5000);
RMApp app = rm.submitApp(2000);
//kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt = app.getCurrentAppAttempt();
MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
//request for containers
final int request = 2;
am.allocate("h1", 1000, request, new ArrayList<ContainerId>());
//kick the scheduler
nm1.nodeHeartbeat(true);
List<Container> conts = null;
int contReceived = 0;
int waitCount = 0;
while (contReceived < request && waitCount++ < 200) {
LOG.info("Got " + contReceived + " containers. Waiting to get " + request);
Thread.sleep(100);
conts = am.allocate(new ArrayList<ResourceRequest>(), new ArrayList<ContainerId>()).getAllocatedContainers();
contReceived += conts.size();
}
Assert.assertEquals(request, contReceived);
for (Container container : conts) {
rm.signalToContainer(container.getId(), SignalContainerCommand.OUTPUT_THREAD_DUMP);
}
NodeHeartbeatResponse resp;
List<SignalContainerRequest> contsToSignal;
int signaledConts = 0;
waitCount = 0;
while (signaledConts < request && waitCount++ < 200) {
LOG.info("Waiting to get signalcontainer events.. signaledConts: " + signaledConts);
resp = nm1.nodeHeartbeat(true);
contsToSignal = resp.getContainersToSignalList();
signaledConts += contsToSignal.size();
Thread.sleep(100);
}
// Verify NM receives the expected number of signal container requests.
Assert.assertEquals(request, signaledConts);
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.FINISHED);
rm.stop();
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestUtils method toSchedulerKey.
public static SchedulerRequestKey toSchedulerKey(Priority pri, long allocationRequestId) {
ResourceRequest req = ResourceRequest.newInstance(pri, null, null, 0);
req.setAllocationRequestId(allocationRequestId);
return SchedulerRequestKey.create(req);
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class FairSchedulerTestBase method createSchedulingRequest.
protected ApplicationAttemptId createSchedulingRequest(int memory, int vcores, String queueId, String userId, int numContainers, int priority) {
ApplicationAttemptId id = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++);
scheduler.addApplication(id.getApplicationId(), queueId, userId, false);
// and no app is added.
if (scheduler.getSchedulerApplications().containsKey(id.getApplicationId())) {
scheduler.addApplicationAttempt(id, false, false);
}
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest request = createResourceRequest(memory, vcores, ResourceRequest.ANY, priority, numContainers, true);
ask.add(request);
RMApp rmApp = mock(RMApp.class);
RMAppAttempt rmAppAttempt = mock(RMAppAttempt.class);
when(rmApp.getCurrentAppAttempt()).thenReturn(rmAppAttempt);
when(rmAppAttempt.getRMAppAttemptMetrics()).thenReturn(new RMAppAttemptMetrics(id, resourceManager.getRMContext()));
ApplicationSubmissionContext submissionContext = mock(ApplicationSubmissionContext.class);
when(submissionContext.getUnmanagedAM()).thenReturn(false);
when(rmAppAttempt.getSubmissionContext()).thenReturn(submissionContext);
Container container = mock(Container.class);
when(rmAppAttempt.getMasterContainer()).thenReturn(container);
resourceManager.getRMContext().getRMApps().put(id.getApplicationId(), rmApp);
scheduler.allocate(id, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
scheduler.update();
return id;
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestContinuousScheduling method testSortedNodes.
@Test(timeout = 10000)
public void testSortedNodes() throws Exception {
// Add two nodes
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(8 * 1024, 8), 2, "127.0.0.2");
NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2);
scheduler.handle(nodeEvent2);
// available resource
Assert.assertEquals(scheduler.getClusterResource().getMemorySize(), 16 * 1024);
Assert.assertEquals(scheduler.getClusterResource().getVirtualCores(), 16);
// send application request
ApplicationAttemptId appAttemptId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++);
createMockRMApp(appAttemptId);
scheduler.addApplication(appAttemptId.getApplicationId(), "queue11", "user11", false);
scheduler.addApplicationAttempt(appAttemptId, false, false);
List<ResourceRequest> ask = new ArrayList<>();
ResourceRequest request = createResourceRequest(1024, 1, ResourceRequest.ANY, 1, 1, true);
ask.add(request);
scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
triggerSchedulingAttempt();
FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
checkAppConsumption(app, Resources.createResource(1024, 1));
// another request
request = createResourceRequest(1024, 1, ResourceRequest.ANY, 2, 1, true);
ask.clear();
ask.add(request);
scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
triggerSchedulingAttempt();
checkAppConsumption(app, Resources.createResource(2048, 2));
// 2 containers should be assigned to 2 nodes
Set<NodeId> nodes = new HashSet<NodeId>();
Iterator<RMContainer> it = app.getLiveContainers().iterator();
while (it.hasNext()) {
nodes.add(it.next().getContainer().getNodeId());
}
Assert.assertEquals(2, nodes.size());
}
use of org.apache.hadoop.yarn.api.records.ResourceRequest in project hadoop by apache.
the class TestContinuousScheduling method testBasic.
@Test(timeout = 60000)
public void testBasic() throws InterruptedException {
// Add one node
String host = "127.0.0.1";
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(4096, 4), 1, host);
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
NodeUpdateSchedulerEvent nodeUpdateEvent = new NodeUpdateSchedulerEvent(node1);
scheduler.handle(nodeUpdateEvent);
ApplicationAttemptId appAttemptId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++);
createMockRMApp(appAttemptId);
scheduler.addApplication(appAttemptId.getApplicationId(), "queue11", "user11", false);
scheduler.addApplicationAttempt(appAttemptId, false, false);
List<ResourceRequest> ask = new ArrayList<>();
ask.add(createResourceRequest(1024, 1, ResourceRequest.ANY, 1, 1, true));
scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
triggerSchedulingAttempt();
checkAppConsumption(app, Resources.createResource(1024, 1));
}
Aggregations