use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestIncreaseAllocationExpirer method testContainerIncreaseAllocationExpiration.
@Test
public void testContainerIncreaseAllocationExpiration() throws Exception {
/**
* 1. Allocate 1 container: containerId2 (1G)
* 2. Increase resource of containerId2: 1G -> 3G
* 3. AM acquires the token
* 4. AM does not use the token
* 5. Verify containerId2's resource usage falls back to
* 1G after the increase token expires
*/
// Set the allocation expiration to 5 seconds
conf.setLong(YarnConfiguration.RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS, 5000);
MockRM rm1 = new MockRM(conf);
rm1.start();
MockNM nm1 = rm1.registerNode("127.0.0.1:1234", 20 * GB);
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "default");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
nm1.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(), 1, ContainerState.RUNNING);
am1.allocate("127.0.0.1", 1 * GB, 1, new ArrayList<ContainerId>());
ContainerId containerId2 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 2);
rm1.waitForState(nm1, containerId2, RMContainerState.ALLOCATED);
List<Container> containers = am1.allocate(null, null).getAllocatedContainers();
Assert.assertEquals(containerId2, containers.get(0).getId());
Assert.assertNotNull(containers.get(0).getContainerToken());
checkUsedResource(rm1, "default", 2 * GB, null);
FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp(rm1, app1.getApplicationId());
Assert.assertEquals(2 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB);
nm1.nodeHeartbeat(app1.getCurrentAppAttempt().getAppAttemptId(), 2, ContainerState.RUNNING);
rm1.waitForState(nm1, containerId2, RMContainerState.RUNNING);
// am1 asks to increase containerId2 from 1GB to 3GB
am1.sendContainerResizingRequest(Collections.singletonList(UpdateContainerRequest.newInstance(0, containerId2, ContainerUpdateType.INCREASE_RESOURCE, Resources.createResource(3 * GB), null)));
// Kick off scheduling and wait for 1 second;
nm1.nodeHeartbeat(true);
Thread.sleep(1000);
// Start container increase allocation expirer
am1.allocate(null, null);
// Verify resource usage
checkUsedResource(rm1, "default", 4 * GB, null);
Assert.assertEquals(4 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB);
// Wait long enough for the increase token to expire, and for the roll
// back action to complete
Thread.sleep(10000);
// Verify container size is 1G
am1.allocate(null, null);
Assert.assertEquals(1 * GB, rm1.getResourceScheduler().getRMContainer(containerId2).getAllocatedResource().getMemorySize());
// Verify total resource usage is 2G
checkUsedResource(rm1, "default", 2 * GB, null);
Assert.assertEquals(2 * GB, app.getAppAttemptResourceUsage().getUsed().getMemorySize());
// Verify available resource is rolled back to 18GB
verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB);
rm1.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testSingleQueueOneUserMetrics.
@Test
public void testSingleQueueOneUserMetrics() throws Exception {
// Manipulate queue 'a'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
// Users
final String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
a.submitApplicationAttempt(app_0, user_0);
final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
// same user
a.submitApplicationAttempt(app_1, user_0);
// Setup some nodes
String host_0 = "127.0.0.1";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
final int numNodes = 1;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests
Priority priority = TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 3, true, priority, recordFactory)));
// Start testing...
// Only 1 container
applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
assertEquals((int) (node_0.getTotalResource().getMemorySize() * a.getCapacity()) - (1 * GB), a.getMetrics().getAvailableMB());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testDRFUsageRatioRounding.
@Test
public void testDRFUsageRatioRounding() throws Exception {
CSAssignment assign;
setUpWithDominantResourceCalculator();
// Mock the queue
LeafQueue b = stubLeafQueue((LeafQueue) queues.get(E));
// Users
final String user0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app0 = new FiCaSchedulerApp(appAttemptId0, user0, b, b.getAbstractUsersManager(), spyRMContext);
b.submitApplicationAttempt(app0, user0);
// Setup some nodes
String host0 = "127.0.0.1";
FiCaSchedulerNode node0 = TestUtils.getMockNode(host0, DEFAULT_RACK, 0, 80 * GB, 100);
// Make cluster relatively large so usageRatios are small
int numNodes = 1000;
Resource clusterResource = Resources.createResource(numNodes * (80 * GB), numNodes * 100);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Set user-limit. Need a small queue within a large cluster.
b.setUserLimit(50);
b.setUserLimitFactor(1000000);
b.setMaxCapacity(1.0f);
b.setAbsoluteCapacity(0.00001f);
// First allocation is larger than second but is still vcore dominant
// so usage ratio will be based on vcores. If consumedRatio doesn't round
// in our favor then new limit calculation will actually be less than
// what is currently consumed and we will fail to allocate
Priority priority = TestUtils.createMockPriority(1);
app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 20 * GB, 29, 1, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 10 * GB, 29, 2, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
assertTrue("Still within limits, should assign", assign.getResource().getMemorySize() > 0);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testLocalityScheduling.
@Test
public void testLocalityScheduling() throws Exception {
// Manipulate queue 'b'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
// User
String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
a.submitApplicationAttempt(app_0, user_0);
// Setup some nodes and racks
String host_0 = "127.0.0.1";
String rack_0 = "rack_0";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8 * GB);
String host_1 = "127.0.0.2";
String rack_1 = "rack_1";
FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8 * GB);
String host_2 = "127.0.0.3";
String rack_2 = "rack_2";
FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8 * GB);
// on rack_1
String host_3 = "127.0.0.4";
FiCaSchedulerNode node_3 = TestUtils.getMockNode(host_3, rack_1, 0, 8 * GB);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2, node_3.getNodeID(), node_3);
final int numNodes = 3;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests and submit
Priority priority = TestUtils.createMockPriority(1);
List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(// one extra
TestUtils.createResourceRequest(// one extra
ResourceRequest.ANY, // one extra
1 * GB, // one extra
3, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// Start testing...
CSAssignment assignment = null;
SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
// Start with off switch, shouldn't allocate due to delay scheduling
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// None->NODE_LOCAL
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Another off switch, shouldn't allocate due to delay scheduling
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// None->NODE_LOCAL
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Another off switch, shouldn't allocate due to delay scheduling
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// None->NODE_LOCAL
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Another off switch, now we should allocate
// since missedOpportunities=3 and reqdContainers=3
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
// should NOT reset
assertEquals(4, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey));
// NODE_LOCAL - node_0
assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
// should reset
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
// NODE_LOCAL - node_1
assignment = a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
// should reset
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Add 1 more request to check for RACK_LOCAL
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(host_1, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 3, true, priority, recordFactory));
app_0_requests_0.add(// one extra
TestUtils.createResourceRequest(// one extra
ResourceRequest.ANY, // one extra
1 * GB, // one extra
4, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey));
// Rack-delay
doReturn(true).when(a).getRackLocalityFullReset();
doReturn(1).when(a).getNodeLocalityDelay();
// Shouldn't assign RACK_LOCAL yet
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey));
// Should assign RACK_LOCAL now
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
// should reset
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// Shouldn't assign RACK_LOCAL because schedulingOpportunities should have gotten reset.
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// Next time we schedule RACK_LOCAL, don't reset
doReturn(false).when(a).getRackLocalityFullReset();
// Should assign RACK_LOCAL now
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
// should NOT reset
assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey));
// Another RACK_LOCAL since schedulingOpportunities not reset
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
// should NOT reset
assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
// Add a request larger than cluster size to verify
// OFF_SWITCH delay is capped by cluster size
app_0.resetSchedulingOpportunities(schedulerKey);
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0, 1 * GB, 100, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 100, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 100, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// Start with off switch. 3 nodes in cluster so shouldn't allocate first 3
for (int i = 0; i < numNodes; i++) {
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(i + 1, app_0.getSchedulingOpportunities(schedulerKey));
}
// delay should be capped at numNodes so next one should allocate
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
assertEquals(numNodes + 1, app_0.getSchedulingOpportunities(schedulerKey));
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp in project hadoop by apache.
the class TestLeafQueue method testApplicationPriorityScheduling.
@Test
public void testApplicationPriorityScheduling() throws Exception {
// Manipulate queue 'a'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
// User
String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
a.submitApplicationAttempt(app_0, user_0);
// Setup some nodes and racks
String host_0 = "127.0.0.1";
String rack_0 = "rack_0";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8 * GB);
String host_1 = "127.0.0.2";
String rack_1 = "rack_1";
FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8 * GB);
String host_2 = "127.0.0.3";
String rack_2 = "rack_2";
FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8 * GB);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
final int numNodes = 3;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), 1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests and submit
List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
// P1
Priority priority_1 = TestUtils.createMockPriority(1);
SchedulerRequestKey schedulerKey1 = toSchedulerKey(priority_1);
app_0_requests_0.add(TestUtils.createResourceRequest(host_0, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
// P2
Priority priority_2 = TestUtils.createMockPriority(2);
SchedulerRequestKey schedulerKey2 = toSchedulerKey(priority_2);
app_0_requests_0.add(TestUtils.createResourceRequest(host_2, 2 * GB, 1, true, priority_2, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_2, 2 * GB, 1, true, priority_2, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priority_2, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// Start testing...
// Start with off switch, shouldn't allocate P1 due to delay scheduling
// thus, no P2 either!
CSAssignment assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Another off-switch, shouldn't allocate P1 due to delay scheduling
// thus, no P2 either!
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Another off-switch, shouldn't allocate OFF_SWITCH P1
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Now, DATA_LOCAL for P1
assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Now, OFF_SWITCH for P2
assignment = a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey2));
}
Aggregations