use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestLeafQueue method testSchedulingConstraints.
@Test
public void testSchedulingConstraints() throws Exception {
// Manipulate queue 'a'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
// User
String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
a.submitApplicationAttempt(app_0, user_0);
// Setup some nodes and racks
String host_0_0 = "127.0.0.1";
String rack_0 = "rack_0";
FiCaSchedulerNode node_0_0 = TestUtils.getMockNode(host_0_0, rack_0, 0, 8 * GB);
String host_0_1 = "127.0.0.2";
FiCaSchedulerNode node_0_1 = TestUtils.getMockNode(host_0_1, rack_0, 0, 8 * GB);
String host_1_0 = "127.0.0.3";
String rack_1 = "rack_1";
FiCaSchedulerNode node_1_0 = TestUtils.getMockNode(host_1_0, rack_1, 0, 8 * GB);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0_0.getNodeID(), node_0_0, node_0_1.getNodeID(), node_0_1, node_1_0.getNodeID(), node_1_0);
final int numNodes = 3;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests and submit
Priority priority = TestUtils.createMockPriority(1);
SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_0, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_0_1, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1_0, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// Start testing...
// Add one request
app_0_requests_0.clear();
app_0_requests_0.add(// only 1
TestUtils.createResourceRequest(// only 1
ResourceRequest.ANY, // only 1
1 * GB, // only 1
1, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// NODE_LOCAL - node_0_1
CSAssignment assignment = a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
// should reset
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
// No allocation on node_1_0 even though it's node/rack local since
// required(ANY) == 0
assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
// Still zero
// since #req=0
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
// Add one request
app_0_requests_0.clear();
app_0_requests_0.add(// only one
TestUtils.createResourceRequest(// only one
ResourceRequest.ANY, // only one
1 * GB, // only one
1, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// No allocation on node_0_1 even though it's node/rack local since
// required(rack_1) == 0
assignment = a.assignContainers(clusterResource, node_0_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
// NODE_LOCAL - node_1
assignment = a.assignContainers(clusterResource, node_1_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
// should reset
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestLeafQueue method testSingleQueueOneUserMetrics.
@Test
public void testSingleQueueOneUserMetrics() throws Exception {
// Manipulate queue 'a'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
// Users
final String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
a.submitApplicationAttempt(app_0, user_0);
final ApplicationAttemptId appAttemptId_1 = TestUtils.getMockApplicationAttemptId(1, 0);
FiCaSchedulerApp app_1 = new FiCaSchedulerApp(appAttemptId_1, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
// same user
a.submitApplicationAttempt(app_1, user_0);
// Setup some nodes
String host_0 = "127.0.0.1";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, DEFAULT_RACK, 0, 8 * GB);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0, app_1.getApplicationAttemptId(), app_1);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0);
final int numNodes = 1;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests
Priority priority = TestUtils.createMockPriority(1);
app_0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 3, true, priority, recordFactory)));
// Start testing...
// Only 1 container
applyCSAssignment(clusterResource, a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY), a, nodes, apps);
assertEquals((int) (node_0.getTotalResource().getMemorySize() * a.getCapacity()) - (1 * GB), a.getMetrics().getAvailableMB());
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestLeafQueue method testDRFUsageRatioRounding.
@Test
public void testDRFUsageRatioRounding() throws Exception {
CSAssignment assign;
setUpWithDominantResourceCalculator();
// Mock the queue
LeafQueue b = stubLeafQueue((LeafQueue) queues.get(E));
// Users
final String user0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app0 = new FiCaSchedulerApp(appAttemptId0, user0, b, b.getAbstractUsersManager(), spyRMContext);
b.submitApplicationAttempt(app0, user0);
// Setup some nodes
String host0 = "127.0.0.1";
FiCaSchedulerNode node0 = TestUtils.getMockNode(host0, DEFAULT_RACK, 0, 80 * GB, 100);
// Make cluster relatively large so usageRatios are small
int numNodes = 1000;
Resource clusterResource = Resources.createResource(numNodes * (80 * GB), numNodes * 100);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Set user-limit. Need a small queue within a large cluster.
b.setUserLimit(50);
b.setUserLimitFactor(1000000);
b.setMaxCapacity(1.0f);
b.setAbsoluteCapacity(0.00001f);
// First allocation is larger than second but is still vcore dominant
// so usage ratio will be based on vcores. If consumedRatio doesn't round
// in our favor then new limit calculation will actually be less than
// what is currently consumed and we will fail to allocate
Priority priority = TestUtils.createMockPriority(1);
app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 20 * GB, 29, 1, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
app0.updateResourceRequests(Collections.singletonList(TestUtils.createResourceRequest(ResourceRequest.ANY, 10 * GB, 29, 2, true, priority, recordFactory, RMNodeLabelsManager.NO_LABEL)));
assign = b.assignContainers(clusterResource, node0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
assertTrue("Still within limits, should assign", assign.getResource().getMemorySize() > 0);
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestLeafQueue method testLocalityScheduling.
@Test
public void testLocalityScheduling() throws Exception {
// Manipulate queue 'b'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(B));
// User
String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
a.submitApplicationAttempt(app_0, user_0);
// Setup some nodes and racks
String host_0 = "127.0.0.1";
String rack_0 = "rack_0";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8 * GB);
String host_1 = "127.0.0.2";
String rack_1 = "rack_1";
FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8 * GB);
String host_2 = "127.0.0.3";
String rack_2 = "rack_2";
FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8 * GB);
// on rack_1
String host_3 = "127.0.0.4";
FiCaSchedulerNode node_3 = TestUtils.getMockNode(host_3, rack_1, 0, 8 * GB);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2, node_3.getNodeID(), node_3);
final int numNodes = 3;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), numNodes * 16);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests and submit
Priority priority = TestUtils.createMockPriority(1);
List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(// one extra
TestUtils.createResourceRequest(// one extra
ResourceRequest.ANY, // one extra
1 * GB, // one extra
3, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// Start testing...
CSAssignment assignment = null;
SchedulerRequestKey schedulerKey = toSchedulerKey(priority);
// Start with off switch, shouldn't allocate due to delay scheduling
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// None->NODE_LOCAL
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Another off switch, shouldn't allocate due to delay scheduling
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// None->NODE_LOCAL
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Another off switch, shouldn't allocate due to delay scheduling
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// None->NODE_LOCAL
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Another off switch, now we should allocate
// since missedOpportunities=3 and reqdContainers=3
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
// should NOT reset
assertEquals(4, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey));
// NODE_LOCAL - node_0
assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
// should reset
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
// NODE_LOCAL - node_1
assignment = a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
// should reset
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey));
assertEquals(NodeType.NODE_LOCAL, assignment.getType());
// Add 1 more request to check for RACK_LOCAL
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(host_1, 1 * GB, 1, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 3, true, priority, recordFactory));
app_0_requests_0.add(// one extra
TestUtils.createResourceRequest(// one extra
ResourceRequest.ANY, // one extra
1 * GB, // one extra
4, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey));
// Rack-delay
doReturn(true).when(a).getRackLocalityFullReset();
doReturn(1).when(a).getNodeLocalityDelay();
// Shouldn't assign RACK_LOCAL yet
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(4, app_0.getOutstandingAsksCount(schedulerKey));
// Should assign RACK_LOCAL now
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
// should reset
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// Shouldn't assign RACK_LOCAL because schedulingOpportunities should have gotten reset.
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(3, app_0.getOutstandingAsksCount(schedulerKey));
// Next time we schedule RACK_LOCAL, don't reset
doReturn(false).when(a).getRackLocalityFullReset();
// Should assign RACK_LOCAL now
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
// should NOT reset
assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey));
// Another RACK_LOCAL since schedulingOpportunities not reset
assignment = a.assignContainers(clusterResource, node_3, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.RACK_LOCAL);
// should NOT reset
assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey));
// Add a request larger than cluster size to verify
// OFF_SWITCH delay is capped by cluster size
app_0.resetSchedulingOpportunities(schedulerKey);
app_0_requests_0.clear();
app_0_requests_0.add(TestUtils.createResourceRequest(host_0, 1 * GB, 100, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 100, true, priority, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 100, true, priority, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// Start with off switch. 3 nodes in cluster so shouldn't allocate first 3
for (int i = 0; i < numNodes; i++) {
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(i + 1, app_0.getSchedulingOpportunities(schedulerKey));
}
// delay should be capped at numNodes so next one should allocate
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
assertEquals(numNodes + 1, app_0.getSchedulingOpportunities(schedulerKey));
}
use of org.apache.hadoop.yarn.api.records.Resource in project hadoop by apache.
the class TestLeafQueue method testApplicationPriorityScheduling.
@Test
public void testApplicationPriorityScheduling() throws Exception {
// Manipulate queue 'a'
LeafQueue a = stubLeafQueue((LeafQueue) queues.get(A));
// User
String user_0 = "user_0";
// Submit applications
final ApplicationAttemptId appAttemptId_0 = TestUtils.getMockApplicationAttemptId(0, 0);
FiCaSchedulerApp app_0 = new FiCaSchedulerApp(appAttemptId_0, user_0, a, mock(ActiveUsersManager.class), spyRMContext);
a.submitApplicationAttempt(app_0, user_0);
// Setup some nodes and racks
String host_0 = "127.0.0.1";
String rack_0 = "rack_0";
FiCaSchedulerNode node_0 = TestUtils.getMockNode(host_0, rack_0, 0, 8 * GB);
String host_1 = "127.0.0.2";
String rack_1 = "rack_1";
FiCaSchedulerNode node_1 = TestUtils.getMockNode(host_1, rack_1, 0, 8 * GB);
String host_2 = "127.0.0.3";
String rack_2 = "rack_2";
FiCaSchedulerNode node_2 = TestUtils.getMockNode(host_2, rack_2, 0, 8 * GB);
Map<ApplicationAttemptId, FiCaSchedulerApp> apps = ImmutableMap.of(app_0.getApplicationAttemptId(), app_0);
Map<NodeId, FiCaSchedulerNode> nodes = ImmutableMap.of(node_0.getNodeID(), node_0, node_1.getNodeID(), node_1, node_2.getNodeID(), node_2);
final int numNodes = 3;
Resource clusterResource = Resources.createResource(numNodes * (8 * GB), 1);
when(csContext.getNumClusterNodes()).thenReturn(numNodes);
// Setup resource-requests and submit
List<ResourceRequest> app_0_requests_0 = new ArrayList<ResourceRequest>();
// P1
Priority priority_1 = TestUtils.createMockPriority(1);
SchedulerRequestKey schedulerKey1 = toSchedulerKey(priority_1);
app_0_requests_0.add(TestUtils.createResourceRequest(host_0, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_0, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(host_1, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_1, 1 * GB, 1, true, priority_1, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 1 * GB, 2, true, priority_1, recordFactory));
// P2
Priority priority_2 = TestUtils.createMockPriority(2);
SchedulerRequestKey schedulerKey2 = toSchedulerKey(priority_2);
app_0_requests_0.add(TestUtils.createResourceRequest(host_2, 2 * GB, 1, true, priority_2, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(rack_2, 2 * GB, 1, true, priority_2, recordFactory));
app_0_requests_0.add(TestUtils.createResourceRequest(ResourceRequest.ANY, 2 * GB, 1, true, priority_2, recordFactory));
app_0.updateResourceRequests(app_0_requests_0);
// Start testing...
// Start with off switch, shouldn't allocate P1 due to delay scheduling
// thus, no P2 either!
CSAssignment assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Another off-switch, shouldn't allocate P1 due to delay scheduling
// thus, no P2 either!
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyNoContainerAllocated(assignment);
assertEquals(2, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(2, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Another off-switch, shouldn't allocate OFF_SWITCH P1
assignment = a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
assertEquals(3, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Now, DATA_LOCAL for P1
assignment = a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.NODE_LOCAL);
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(1, app_0.getOutstandingAsksCount(schedulerKey2));
// Now, OFF_SWITCH for P2
assignment = a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY);
applyCSAssignment(clusterResource, assignment, a, nodes, apps);
verifyContainerAllocated(assignment, NodeType.OFF_SWITCH);
assertEquals(0, app_0.getSchedulingOpportunities(schedulerKey1));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey1));
assertEquals(1, app_0.getSchedulingOpportunities(schedulerKey2));
assertEquals(0, app_0.getOutstandingAsksCount(schedulerKey2));
}
Aggregations