use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager in project hadoop by apache.
the class TestCapacitySchedulerLazyPreemption method testPreemptionConsidersHardNodeLocality.
@Test(timeout = 60000)
public void testPreemptionConsidersHardNodeLocality() throws Exception {
/**
* Test case: same as testSimplePreemption steps 1-3.
*
* Step 4: app2 asks for 1G container with hard locality specified, and
* asked host is not existed
* Confirm system doesn't preempt any container.
*/
MockRM rm1 = new MockRM(conf);
rm1.getRMContext().setNodeLabelManager(mgr);
rm1.start();
MockNM nm1 = rm1.registerNode("h1:1234", 4 * GB);
MockNM nm2 = rm1.registerNode("h2:1234", 4 * GB);
CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
// launch an app to queue, AM container should be launched in nm1
RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
am1.allocate("*", 1 * GB, 6, new ArrayList<ContainerId>());
// Do allocation 3 times for node1/node2
for (int i = 0; i < 3; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
}
for (int i = 0; i < 3; i++) {
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
}
// App1 should have 7 containers now, and no available resource for cluster
FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(am1.getApplicationAttemptId());
Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
// Submit app2 to queue-c and asks for a 1G container for AM
RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "c");
MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm2);
// NM1/NM2 has available resource = 0G
Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()).getUnallocatedResource().getMemorySize());
Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId()).getUnallocatedResource().getMemorySize());
// AM asks for a 1 * GB container for h3 with hard locality,
// h3 doesn't exist in the cluster
am2.allocate(Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1), ResourceRequest.ANY, Resources.createResource(1 * GB), 1, true), ResourceRequest.newInstance(Priority.newInstance(1), "h3", Resources.createResource(1 * GB), 1, false), ResourceRequest.newInstance(Priority.newInstance(1), "/default-rack", Resources.createResource(1 * GB), 1, false)), null);
// Get edit policy and do one update
SchedulingEditPolicy editPolicy = getSchedulingEditPolicy(rm1);
// Call edit schedule twice, and check if one container from app1 marked
// to be "killable"
editPolicy.editSchedule();
editPolicy.editSchedule();
PreemptionManager pm = cs.getPreemptionManager();
Map<ContainerId, RMContainer> killableContainers = waitKillableContainersSize(pm, "a", RMNodeLabelsManager.NO_LABEL, 1);
Assert.assertEquals(killableContainers.entrySet().iterator().next().getKey().getApplicationAttemptId(), am1.getApplicationAttemptId());
// Call CS.handle once to see if container preempted
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(am2.getApplicationAttemptId());
// App1 has 7 containers, and app2 has 1 containers (no container preempted)
Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
Assert.assertEquals(1, schedulerApp2.getLiveContainers().size());
// Do allocation again, nothing will be preempted
cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
// App1 has 7 containers, and app2 has 1 containers (no container allocated)
Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
Assert.assertEquals(1, schedulerApp2.getLiveContainers().size());
rm1.close();
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager in project hadoop by apache.
the class TestLeafQueue method mockCSContext.
private CapacitySchedulerContext mockCSContext(CapacitySchedulerConfiguration csConf, Resource clusterResource) {
CapacitySchedulerContext csContext = mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getConf()).thenReturn(new YarnConfiguration());
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
when(csContext.getClusterResource()).thenReturn(clusterResource);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB, 1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(2 * GB, 2));
when(csContext.getPreemptionManager()).thenReturn(new PreemptionManager());
return csContext;
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager in project hadoop by apache.
the class TestLeafQueue method setUpInternal.
private void setUpInternal(ResourceCalculator rC) throws Exception {
CapacityScheduler spyCs = new CapacityScheduler();
queues = new HashMap<String, CSQueue>();
cs = spy(spyCs);
rmContext = TestUtils.getMockRMContext();
spyRMContext = spy(rmContext);
ConcurrentMap<ApplicationId, RMApp> spyApps = spy(new ConcurrentHashMap<ApplicationId, RMApp>());
RMApp rmApp = mock(RMApp.class);
when(rmApp.getRMAppAttempt((ApplicationAttemptId) Matchers.any())).thenReturn(null);
amResourceRequest = mock(ResourceRequest.class);
when(amResourceRequest.getCapability()).thenReturn(Resources.createResource(0, 0));
when(rmApp.getAMResourceRequest()).thenReturn(amResourceRequest);
Mockito.doReturn(rmApp).when(spyApps).get((ApplicationId) Matchers.any());
when(spyRMContext.getRMApps()).thenReturn(spyApps);
csConf = new CapacitySchedulerConfiguration();
csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
csConf.setBoolean(CapacitySchedulerConfiguration.RESERVE_CONT_LOOK_ALL_NODES, false);
final String newRoot = "root" + System.currentTimeMillis();
setupQueueConfiguration(csConf, newRoot);
YarnConfiguration conf = new YarnConfiguration();
cs.setConf(conf);
csContext = mock(CapacitySchedulerContext.class);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB, 1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB, 32));
when(csContext.getClusterResource()).thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32));
when(csContext.getResourceCalculator()).thenReturn(resourceCalculator);
when(csContext.getPreemptionManager()).thenReturn(new PreemptionManager());
when(csContext.getResourceCalculator()).thenReturn(rC);
when(csContext.getRMContext()).thenReturn(rmContext);
RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
when(csContext.getContainerTokenSecretManager()).thenReturn(containerTokenSecretManager);
root = CapacitySchedulerQueueManager.parseQueue(csContext, csConf, null, CapacitySchedulerConfiguration.ROOT, queues, queues, TestUtils.spyHook);
ResourceUsage queueResUsage = root.getQueueResourceUsage();
when(csContext.getClusterResourceUsage()).thenReturn(queueResUsage);
cs.setRMContext(spyRMContext);
cs.init(csConf);
cs.setResourceCalculator(rC);
cs.start();
when(spyRMContext.getScheduler()).thenReturn(cs);
when(spyRMContext.getYarnConfiguration()).thenReturn(new YarnConfiguration());
when(cs.getNumClusterNodes()).thenReturn(3);
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager in project hadoop by apache.
the class TestChildQueueOrder method setUp.
@Before
public void setUp() throws Exception {
rmContext = TestUtils.getMockRMContext();
conf = new YarnConfiguration();
csConf = new CapacitySchedulerConfiguration();
csContext = mock(CapacitySchedulerContext.class);
when(csContext.getConf()).thenReturn(conf);
when(csContext.getConfiguration()).thenReturn(csConf);
when(csContext.getMinimumResourceCapability()).thenReturn(Resources.createResource(GB, 1));
when(csContext.getMaximumResourceCapability()).thenReturn(Resources.createResource(16 * GB, 32));
when(csContext.getClusterResource()).thenReturn(Resources.createResource(100 * 16 * GB, 100 * 32));
when(csContext.getResourceCalculator()).thenReturn(resourceComparator);
when(csContext.getRMContext()).thenReturn(rmContext);
when(csContext.getPreemptionManager()).thenReturn(new PreemptionManager());
}
use of org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager in project hadoop by apache.
the class TestProportionalCapacityPreemptionPolicy method setup.
@Before
@SuppressWarnings("unchecked")
public void setup() {
conf = new CapacitySchedulerConfiguration(new Configuration(false));
conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_WAIT_TIME_BEFORE_KILL, 10000);
conf.setLong(CapacitySchedulerConfiguration.PREEMPTION_MONITORING_INTERVAL, 3000);
// report "ideal" preempt
conf.setFloat(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND, 1.0f);
conf.setFloat(CapacitySchedulerConfiguration.PREEMPTION_NATURAL_TERMINATION_FACTOR, 1.0f);
conf.set(YarnConfiguration.RM_SCHEDULER_MONITOR_POLICIES, ProportionalCapacityPreemptionPolicy.class.getCanonicalName());
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
// FairScheduler doesn't support this test,
// Set CapacityScheduler as the scheduler for this test.
conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
mClock = mock(Clock.class);
mCS = mock(CapacityScheduler.class);
when(mCS.getResourceCalculator()).thenReturn(rc);
lm = mock(RMNodeLabelsManager.class);
try {
when(lm.isExclusiveNodeLabel(anyString())).thenReturn(true);
} catch (IOException e) {
// do nothing
}
when(mCS.getConfiguration()).thenReturn(conf);
rmContext = mock(RMContext.class);
when(mCS.getRMContext()).thenReturn(rmContext);
when(mCS.getPreemptionManager()).thenReturn(new PreemptionManager());
when(rmContext.getNodeLabelManager()).thenReturn(lm);
mDisp = mock(EventHandler.class);
Dispatcher disp = mock(Dispatcher.class);
when(rmContext.getDispatcher()).thenReturn(disp);
when(disp.getEventHandler()).thenReturn(mDisp);
rand = new Random();
long seed = rand.nextLong();
System.out.println(name.getMethodName() + " SEED: " + seed);
rand.setSeed(seed);
appAlloc = 0;
}
Aggregations