use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent in project hadoop by apache.
the class TestContainerAllocation method testReservedContainerMetricsOnDecommisionedNode.
@Test(timeout = 60000)
public void testReservedContainerMetricsOnDecommisionedNode() throws Exception {
/**
* Test case: Submit two application (app1/app2) to a queue. And there's one
* node with 8G resource in the cluster. App1 allocates a 6G container, Then
* app2 asks for a 4G container. App2's request will be reserved on the
* node.
*
* Before next node heartbeat, app1 container is completed/killed. So app1
* container which was reserved will be allocated.
*/
// inject node label manager
MockRM rm1 = new MockRM();
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 8 * GB);
MockNM nm2 = rm1.registerNode("h2:1234", 8 * GB);
// launch an app to queue, AM container should be launched in nm1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
// launch another app to queue, AM container should be launched in nm1
RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "default");
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
am1.allocate("*", 4 * GB, 1, new ArrayList<ContainerId>());
am2.allocate("*", 4 * GB, 1, new ArrayList<ContainerId>());
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
LeafQueue leafQueue = (LeafQueue) cs.getQueue("default");
// Do node heartbeats 2 times
// First time will allocate container for app1, second time will reserve
// container for app2
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
// App2 will get preference to be allocated on node1, and node1 will be all
// used by App2.
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId());
// Check if a 4G container allocated for app1, and nothing allocated for app2
Assert.assertEquals(2, schedulerApp1.getLiveContainers().size());
Assert.assertEquals(1, schedulerApp2.getLiveContainers().size());
Assert.assertTrue(schedulerApp2.getReservedContainers().size() > 0);
// NM1 has available resource = 2G (8G - 2 * 1G - 4G)
Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()).getUnallocatedResource().getMemorySize());
Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
// Usage of queue = 4G + 2 * 1G + 4G (reserved)
Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage().getUsed().getMemorySize());
Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage().getReserved().getMemorySize());
Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved().getMemorySize());
// Remove the node
cs.handle(new NodeRemovedSchedulerEvent(rmNode1));
// Check all container cancelled for app1 and app2
Assert.assertEquals(0, schedulerApp1.getLiveContainers().size());
Assert.assertEquals(0, schedulerApp2.getLiveContainers().size());
Assert.assertFalse(schedulerApp2.getReservedContainers().size() > 0);
// Usage and Reserved capacity of queue is 0
Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage().getUsed().getMemorySize());
Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage().getReserved().getMemorySize());
Assert.assertEquals(0 * GB, leafQueue.getQueueResourceUsage().getReserved().getMemorySize());
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent in project hadoop by apache.
the class TestSchedulingUpdate method testSchedulingUpdateOnNodeJoinLeave.
@Test(timeout = 3000)
public void testSchedulingUpdateOnNodeJoinLeave() throws InterruptedException {
verifyNoCalls();
// Add one node
String host = "127.0.0.1";
final int memory = 4096;
final int cores = 4;
RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(memory, cores), 1, host);
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1);
scheduler.handle(nodeEvent1);
long expectedCalls = 1;
verifyExpectedCalls(expectedCalls, memory, cores);
// Remove the node
NodeRemovedSchedulerEvent nodeEvent2 = new NodeRemovedSchedulerEvent(node1);
scheduler.handle(nodeEvent2);
expectedCalls = 2;
verifyExpectedCalls(expectedCalls, 0, 0);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent in project hadoop by apache.
the class TestFifoScheduler method testReconnectedNode.
@Test(timeout = 50000)
public void testReconnectedNode() throws Exception {
CapacitySchedulerConfiguration conf = new CapacitySchedulerConfiguration();
conf.setQueues("default", new String[] { "default" });
conf.setCapacity("default", 100);
FifoScheduler fs = new FifoScheduler();
fs.init(conf);
fs.start();
// mock rmContext to avoid NPE.
RMContext context = mock(RMContext.class);
fs.reinitialize(conf, null);
fs.setRMContext(context);
RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1, "127.0.0.2");
RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2, "127.0.0.3");
fs.handle(new NodeAddedSchedulerEvent(n1));
fs.handle(new NodeAddedSchedulerEvent(n2));
fs.handle(new NodeUpdateSchedulerEvent(n1));
Assert.assertEquals(6 * GB, fs.getRootQueueMetrics().getAvailableMB());
// reconnect n1 with downgraded memory
n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1, "127.0.0.2");
fs.handle(new NodeRemovedSchedulerEvent(n1));
fs.handle(new NodeAddedSchedulerEvent(n1));
fs.handle(new NodeUpdateSchedulerEvent(n1));
Assert.assertEquals(4 * GB, fs.getRootQueueMetrics().getAvailableMB());
fs.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent in project hadoop by apache.
the class TestCapacityScheduler method testNumClusterNodes.
@Test
public void testNumClusterNodes() throws Exception {
YarnConfiguration conf = new YarnConfiguration();
CapacityScheduler cs = new CapacityScheduler();
cs.setConf(conf);
RMContext rmContext = TestUtils.getMockRMContext();
cs.setRMContext(rmContext);
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
setupQueueConfiguration(csConf);
cs.init(csConf);
cs.start();
assertEquals(0, cs.getNumClusterNodes());
RMNode n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(4 * GB), 1);
RMNode n2 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 2);
cs.handle(new NodeAddedSchedulerEvent(n1));
cs.handle(new NodeAddedSchedulerEvent(n2));
assertEquals(2, cs.getNumClusterNodes());
cs.handle(new NodeRemovedSchedulerEvent(n1));
assertEquals(1, cs.getNumClusterNodes());
cs.handle(new NodeAddedSchedulerEvent(n1));
assertEquals(2, cs.getNumClusterNodes());
cs.handle(new NodeRemovedSchedulerEvent(n2));
cs.handle(new NodeRemovedSchedulerEvent(n1));
assertEquals(0, cs.getNumClusterNodes());
cs.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent in project hadoop by apache.
the class TestAbstractYarnScheduler method testMaximumAllocationMemoryHelper.
private void testMaximumAllocationMemoryHelper(YarnScheduler scheduler, final int node1MaxMemory, final int node2MaxMemory, final int node3MaxMemory, final int... expectedMaxMemory) throws Exception {
Assert.assertEquals(6, expectedMaxMemory.length);
Assert.assertEquals(0, scheduler.getNumClusterNodes());
long maxMemory = scheduler.getMaximumResourceCapability().getMemorySize();
Assert.assertEquals(expectedMaxMemory[0], maxMemory);
RMNode node1 = MockNodes.newNodeInfo(0, Resources.createResource(node1MaxMemory), 1, "127.0.0.2");
scheduler.handle(new NodeAddedSchedulerEvent(node1));
Assert.assertEquals(1, scheduler.getNumClusterNodes());
maxMemory = scheduler.getMaximumResourceCapability().getMemorySize();
Assert.assertEquals(expectedMaxMemory[1], maxMemory);
scheduler.handle(new NodeRemovedSchedulerEvent(node1));
Assert.assertEquals(0, scheduler.getNumClusterNodes());
maxMemory = scheduler.getMaximumResourceCapability().getMemorySize();
Assert.assertEquals(expectedMaxMemory[2], maxMemory);
RMNode node2 = MockNodes.newNodeInfo(0, Resources.createResource(node2MaxMemory), 2, "127.0.0.3");
scheduler.handle(new NodeAddedSchedulerEvent(node2));
Assert.assertEquals(1, scheduler.getNumClusterNodes());
maxMemory = scheduler.getMaximumResourceCapability().getMemorySize();
Assert.assertEquals(expectedMaxMemory[3], maxMemory);
RMNode node3 = MockNodes.newNodeInfo(0, Resources.createResource(node3MaxMemory), 3, "127.0.0.4");
scheduler.handle(new NodeAddedSchedulerEvent(node3));
Assert.assertEquals(2, scheduler.getNumClusterNodes());
maxMemory = scheduler.getMaximumResourceCapability().getMemorySize();
Assert.assertEquals(expectedMaxMemory[4], maxMemory);
scheduler.handle(new NodeRemovedSchedulerEvent(node3));
Assert.assertEquals(1, scheduler.getNumClusterNodes());
maxMemory = scheduler.getMaximumResourceCapability().getMemorySize();
Assert.assertEquals(expectedMaxMemory[5], maxMemory);
scheduler.handle(new NodeRemovedSchedulerEvent(node2));
Assert.assertEquals(0, scheduler.getNumClusterNodes());
}
Aggregations