use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestProportionalCapacityPreemptionPolicy method mockLeafQueue.
@SuppressWarnings("rawtypes")
LeafQueue mockLeafQueue(ParentQueue p, Resource tot, int i, Resource[] abs, Resource[] used, Resource[] pending, Resource[] reserved, int[] apps, Resource[] gran) {
LeafQueue lq = mock(LeafQueue.class);
ResourceCalculator rc = mCS.getResourceCalculator();
List<ApplicationAttemptId> appAttemptIdList = new ArrayList<ApplicationAttemptId>();
when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(false))).thenReturn(pending[i]);
when(lq.getTotalPendingResourcesConsideringUserLimit(isA(Resource.class), isA(String.class), eq(true))).thenReturn(Resources.componentwiseMax(Resources.subtract(pending[i], reserved[i] == null ? Resources.none() : reserved[i]), Resources.none()));
// need to set pending resource in resource usage as well
ResourceUsage ru = new ResourceUsage();
ru.setPending(pending[i]);
ru.setUsed(used[i]);
ru.setReserved(reserved[i]);
when(lq.getQueueResourceUsage()).thenReturn(ru);
// consider moving where CapacityScheduler::comparator accessible
final NavigableSet<FiCaSchedulerApp> qApps = new TreeSet<FiCaSchedulerApp>(new Comparator<FiCaSchedulerApp>() {
@Override
public int compare(FiCaSchedulerApp a1, FiCaSchedulerApp a2) {
return a1.getApplicationAttemptId().compareTo(a2.getApplicationAttemptId());
}
});
// applications are added in global L->R order in queues
if (apps[i] != 0) {
Resource aUsed = Resources.divideAndCeil(rc, used[i], apps[i]);
Resource aPending = Resources.divideAndCeil(rc, pending[i], apps[i]);
Resource aReserve = Resources.divideAndCeil(rc, reserved[i], apps[i]);
for (int a = 0; a < apps[i]; ++a) {
FiCaSchedulerApp mockFiCaApp = mockApp(i, appAlloc, aUsed, aPending, aReserve, gran[i]);
qApps.add(mockFiCaApp);
++appAlloc;
appAttemptIdList.add(mockFiCaApp.getApplicationAttemptId());
}
when(mCS.getAppsInQueue("queue" + (char) ('A' + i - 1))).thenReturn(appAttemptIdList);
}
when(lq.getApplications()).thenReturn(qApps);
@SuppressWarnings("unchecked") OrderingPolicy<FiCaSchedulerApp> so = mock(OrderingPolicy.class);
when(so.getPreemptionIterator()).thenAnswer(new Answer() {
public Object answer(InvocationOnMock invocation) {
return qApps.descendingIterator();
}
});
when(lq.getOrderingPolicy()).thenReturn(so);
if (setAMResourcePercent != 0.0f) {
when(lq.getMaxAMResourcePerQueuePercent()).thenReturn(setAMResourcePercent);
}
ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
when(lq.getReadLock()).thenReturn(lock.readLock());
when(lq.getPriority()).thenReturn(Priority.newInstance(0));
p.getChildQueues().add(lq);
return lq;
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestProportionalCapacityPreemptionPolicy method mockApp.
FiCaSchedulerApp mockApp(int qid, int id, Resource used, Resource pending, Resource reserved, Resource gran) {
FiCaSchedulerApp app = mock(FiCaSchedulerApp.class);
ResourceCalculator rc = mCS.getResourceCalculator();
ApplicationId appId = ApplicationId.newInstance(TS, id);
ApplicationAttemptId appAttId = ApplicationAttemptId.newInstance(appId, 0);
when(app.getApplicationId()).thenReturn(appId);
when(app.getApplicationAttemptId()).thenReturn(appAttId);
int cAlloc = 0;
Resource unit = gran;
List<RMContainer> cReserved = new ArrayList<RMContainer>();
Resource resIter = Resource.newInstance(0, 0);
for (; Resources.lessThan(rc, clusterResources, resIter, reserved); Resources.addTo(resIter, gran)) {
cReserved.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER.getValue()));
++cAlloc;
}
when(app.getReservedContainers()).thenReturn(cReserved);
List<RMContainer> cLive = new ArrayList<RMContainer>();
Resource usedIter = Resource.newInstance(0, 0);
int i = 0;
for (; Resources.lessThan(rc, clusterResources, usedIter, used); Resources.addTo(usedIter, gran)) {
if (setAMContainer && i == 0) {
cLive.add(mockContainer(appAttId, cAlloc, unit, priority.AMCONTAINER.getValue()));
} else if (setLabeledContainer && i == 1) {
cLive.add(mockContainer(appAttId, cAlloc, unit, priority.LABELEDCONTAINER.getValue()));
Resources.addTo(used, Resource.newInstance(1, 1));
} else {
cLive.add(mockContainer(appAttId, cAlloc, unit, priority.CONTAINER.getValue()));
}
++cAlloc;
++i;
}
when(app.getLiveContainers()).thenReturn(cLive);
return app;
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestCapacitySchedulerSurgicalPreemption method testPriorityPreemptionWhenAllQueuesAreBelowGuaranteedCapacities.
@Test(timeout = 60000)
public void testPriorityPreemptionWhenAllQueuesAreBelowGuaranteedCapacities() throws Exception {
/**
* Test case: Submit two application (app1/app2) to different queues, queue
* structure:
*
* <pre>
* Root
* / | \
* a b c
* 10 20 70
* </pre>
*
* 1) Two nodes (n1/n2) in the cluster, each of them has 20G.
*
* 2) app1 submit to queue-b first, it asked 6 * 1G containers
* We will allocate 4 on n1 (including AM) and 3 on n2.
*
* 3) app2 submit to queue-c, ask for one 18G container (for AM)
*
* After preemption, we should expect:
* Preempt 3 containers from app1 and AM of app2 successfully allocated.
*/
conf.setPUOrderingPolicyUnderUtilizedPreemptionEnabled(true);
conf.setPUOrderingPolicyUnderUtilizedPreemptionDelay(1000);
conf.setQueueOrderingPolicy(CapacitySchedulerConfiguration.ROOT, CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
// Queue c has higher priority than a/b
conf.setQueuePriority(CapacitySchedulerConfiguration.ROOT + ".c", 1);
MockRM rm1 = new MockRM(conf);
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
MockNM nm2 = rm1.registerNode("h2:1234", 20 * GB);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
// launch an app to queue, AM container should be launched in nm1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
am1.allocate("*", 1 * GB, 6, new ArrayList<>());
// Do allocation for node1/node2
for (int i = 0; i < 3; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
// App1 should have 7 containers now, so the abs-used-cap of b is
// 7 / 40 = 17.5% < 20% (guaranteed)
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
// 4 from n1 and 3 from n2
waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()), am1.getApplicationAttemptId(), 4);
waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()), am1.getApplicationAttemptId(), 3);
// Submit app2 to queue-c and asks for a 1G container for AM
RMApp app2 = rm1.submitApp(18 * GB, "app", "user", null, "c");
FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
while (cs.getNode(rmNode1.getNodeID()).getReservedContainer() == null) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
Thread.sleep(10);
}
// Call editSchedule immediately: containers are not selected
ProportionalCapacityPreemptionPolicy editPolicy = (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
editPolicy.editSchedule();
Assert.assertEquals(0, editPolicy.getToPreemptContainers().size());
// Sleep the timeout interval, we should be able to see containers selected
Thread.sleep(1000);
editPolicy.editSchedule();
Assert.assertEquals(2, editPolicy.getToPreemptContainers().size());
// Call editSchedule again: selected containers are killed, and new AM
// container launched
editPolicy.editSchedule();
// Do allocation till reserved container allocated
while (cs.getNode(rmNode1.getNodeID()).getReservedContainer() != null) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
Thread.sleep(10);
}
waitNumberOfLiveContainersFromApp(schedulerApp2, 1);
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestCapacitySchedulerSurgicalPreemption method testSurgicalPreemptionWithAvailableResource.
@Test(timeout = 60000)
public void testSurgicalPreemptionWithAvailableResource() throws Exception {
/**
* Test case: Submit two application (app1/app2) to different queues, queue
* structure:
*
* <pre>
* Root
* / | \
* a b c
* 10 20 70
* </pre>
*
* 1) Two nodes (n1/n2) in the cluster, each of them has 20G.
*
* 2) app1 submit to queue-b, asks for 1G * 5
*
* 3) app2 submit to queue-c, ask for one 4G container (for AM)
*
* After preemption, we should expect:
* Preempt 3 containers from app1 and AM of app2 successfully allocated.
*/
MockRM rm1 = new MockRM(conf);
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
MockNM nm2 = rm1.registerNode("h2:1234", 20 * GB);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
// launch an app to queue, AM container should be launched in nm1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
am1.allocate("*", 1 * GB, 38, new ArrayList<ContainerId>());
// Do allocation for node1/node2
for (int i = 0; i < 38; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
// App1 should have 31 containers now
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
Assert.assertEquals(39, schedulerApp1.getLiveContainers().size());
// 17 from n1 and 16 from n2
waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()), am1.getApplicationAttemptId(), 20);
waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()), am1.getApplicationAttemptId(), 19);
// Submit app2 to queue-c and asks for a 4G container for AM
RMApp app2 = rm1.submitApp(4 * GB, "app", "user", null, "c");
FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
// Call editSchedule: containers are selected to be preemption candidate
ProportionalCapacityPreemptionPolicy editPolicy = (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
editPolicy.editSchedule();
Assert.assertEquals(3, editPolicy.getToPreemptContainers().size());
// Call editSchedule again: selected containers are killed
editPolicy.editSchedule();
waitNumberOfLiveContainersFromApp(schedulerApp1, 36);
// Call allocation, containers are reserved
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
waitNumberOfReservedContainersFromApp(schedulerApp2, 1);
// Call editSchedule twice and allocation once, container should get allocated
editPolicy.editSchedule();
editPolicy.editSchedule();
int tick = 0;
while (schedulerApp2.getLiveContainers().size() != 1 && tick < 10) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
tick++;
Thread.sleep(100);
}
waitNumberOfReservedContainersFromApp(schedulerApp2, 0);
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestChildQueueOrder method testSortedQueues.
@Test
@SuppressWarnings("unchecked")
public void testSortedQueues() throws Exception {
// Setup queue configs
setupSortedQueues(csConf);
Map<String, CSQueue> queues = new HashMap<String, CSQueue>();
CSQueue root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
// Setup some nodes
final int memoryPerNode = 10;
final int coresPerNode = 16;
final int numNodes = 1;
FiCaSchedulerNode node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, memoryPerNode * GB);
doNothing().when(node_0).releaseContainer(any(ContainerId.class), anyBoolean());
final Resource clusterResource = Resources.createResource(numNodes * (memoryPerNode * GB), numNodes * coresPerNode);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Start testing
CSQueue a = queues.get(A);
CSQueue b = queues.get(B);
CSQueue c = queues.get(C);
CSQueue d = queues.get(D);
// Make a/b/c/d has >0 pending resource, so that allocation will continue.
queues.get(CapacitySchedulerConfiguration.ROOT).getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
a.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
b.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
c.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
d.getQueueResourceUsage().incPending(Resources.createResource(1 * GB));
final String user_0 = "user_0";
// Stub an App and its containerCompleted
FiCaSchedulerApp app_0 = getMockApplication(0, user_0);
doReturn(true).when(app_0).containerCompleted(any(RMContainer.class), any(ContainerStatus.class), any(RMContainerEventType.class), any(String.class));
Priority priority = TestUtils.createMockPriority(1);
ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
DrainDispatcher drainDispatcher = new DrainDispatcher();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
when(rmContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(app_0.getApplicationId(), 1);
ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
Container container = TestUtils.getMockContainer(containerId, node_0.getNodeID(), Resources.createResource(1 * GB), priority);
RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, node_0.getNodeID(), "user", rmContext);
// Assign {1,2,3,4} 1GB containers respectively to queues
stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
for (int i = 0; i < 2; i++) {
stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
}
for (int i = 0; i < 3; i++) {
stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
stubQueueAllocation(c, clusterResource, node_0, 1 * GB);
stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
}
for (int i = 0; i < 4; i++) {
stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
}
verifyQueueMetrics(a, 1 * GB, clusterResource);
verifyQueueMetrics(b, 2 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
verifyQueueMetrics(d, 4 * GB, clusterResource);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
//Release 3 x 1GB containers from D
for (int i = 0; i < 3; i++) {
d.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
}
verifyQueueMetrics(a, 1 * GB, clusterResource);
verifyQueueMetrics(b, 2 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
verifyQueueMetrics(d, 1 * GB, clusterResource);
//reset manually resources on node
node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 1 - 2 - 3 - 1) * GB);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
// Assign 2 x 1GB Containers to A
for (int i = 0; i < 2; i++) {
stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
}
verifyQueueMetrics(a, 3 * GB, clusterResource);
verifyQueueMetrics(b, 2 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
verifyQueueMetrics(d, 1 * GB, clusterResource);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
//Release 1GB Container from A
a.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
verifyQueueMetrics(a, 2 * GB, clusterResource);
verifyQueueMetrics(b, 2 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
verifyQueueMetrics(d, 1 * GB, clusterResource);
//reset manually resources on node
node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
// Assign 1GB container to B
stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
verifyQueueMetrics(a, 2 * GB, clusterResource);
verifyQueueMetrics(b, 3 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
verifyQueueMetrics(d, 1 * GB, clusterResource);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
//Release 1GB container resources from B
b.completedContainer(clusterResource, app_0, node_0, rmContainer, null, RMContainerEventType.KILL, null, true);
verifyQueueMetrics(a, 2 * GB, clusterResource);
verifyQueueMetrics(b, 2 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
verifyQueueMetrics(d, 1 * GB, clusterResource);
//reset manually resources on node
node_0 = TestUtils.getMockNode("host_0", DEFAULT_RACK, 0, (memoryPerNode - 2 - 2 - 3 - 1) * GB);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
// Assign 1GB container to A
stubQueueAllocation(a, clusterResource, node_0, 1 * GB);
stubQueueAllocation(b, clusterResource, node_0, 0 * GB);
stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
stubQueueAllocation(d, clusterResource, node_0, 0 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
verifyQueueMetrics(a, 3 * GB, clusterResource);
verifyQueueMetrics(b, 2 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
verifyQueueMetrics(d, 1 * GB, clusterResource);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
// Now do the real test, where B and D request a 1GB container
// D should should get the next container if the order is correct
stubQueueAllocation(a, clusterResource, node_0, 0 * GB);
stubQueueAllocation(b, clusterResource, node_0, 1 * GB);
stubQueueAllocation(c, clusterResource, node_0, 0 * GB);
stubQueueAllocation(d, clusterResource, node_0, 1 * GB);
root.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
InOrder allocationOrder = inOrder(d, b);
allocationOrder.verify(d).assignContainers(eq(clusterResource), any(PlacementSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
allocationOrder.verify(b).assignContainers(eq(clusterResource), any(PlacementSet.class), any(ResourceLimits.class), any(SchedulingMode.class));
verifyQueueMetrics(a, 3 * GB, clusterResource);
verifyQueueMetrics(b, 2 * GB, clusterResource);
verifyQueueMetrics(c, 3 * GB, clusterResource);
//D got the container
verifyQueueMetrics(d, 2 * GB, clusterResource);
LOG.info("status child-queues: " + ((ParentQueue) root).getChildQueuesToPrint());
}
Aggregations