use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestFifoScheduler method testAppAttemptMetrics.
@Test(timeout = 5000)
public void testAppAttemptMetrics() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
FifoScheduler scheduler = new FifoScheduler();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, null, null, null, scheduler);
((RMContextImpl) rmContext).setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
Configuration conf = new Configuration();
((RMContextImpl) rmContext).setScheduler(scheduler);
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, rmContext);
QueueMetrics metrics = scheduler.getRootQueueMetrics();
int beforeAppsSubmitted = metrics.getAppsSubmitted();
ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user");
scheduler.handle(appEvent);
SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
SchedulerEvent attemptEvent2 = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent2);
int afterAppsSubmitted = metrics.getAppsSubmitted();
Assert.assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted);
scheduler.stop();
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestFifoScheduler method testUpdateResourceOnNode.
@Test(timeout = 2000)
public void testUpdateResourceOnNode() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
Configuration conf = new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
FifoScheduler scheduler = new FifoScheduler();
RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class));
((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
NullRMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
nlm.init(new Configuration());
rmContext.setNodeLabelManager(nlm);
scheduler.setRMContext(rmContext);
((RMContextImpl) rmContext).setScheduler(scheduler);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(), rmContext);
RMNode node0 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 4), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
assertEquals(scheduler.getNumClusterNodes(), 1);
Resource newResource = Resources.createResource(1024, 4);
NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new NodeResourceUpdateSchedulerEvent(node0, ResourceOption.newInstance(newResource, ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT));
scheduler.handle(node0ResourceUpdate);
// SchedulerNode's total resource and available resource are changed.
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()).getTotalResource().getMemorySize());
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()).getUnallocatedResource().getMemorySize(), 1024);
QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false);
Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f);
int _appId = 1;
int _appAttemptId = 1;
ApplicationAttemptId appAttemptId = createAppAttemptId(_appId, _appAttemptId);
createMockRMApp(appAttemptId, rmContext);
AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", "user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
int memory = 1024;
int priority = 1;
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest nodeLocal = createResourceRequest(memory, node0.getHostName(), priority, 1);
ResourceRequest rackLocal = createResourceRequest(memory, node0.getRackName(), priority, 1);
ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority, 1);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
// Before the node update event, there are one local request
Assert.assertEquals(1, nodeLocal.getNumContainers());
NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0);
// Now schedule.
scheduler.handle(node0Update);
// After the node update event, check no local request
Assert.assertEquals(0, nodeLocal.getNumContainers());
// Also check that one container was scheduled
SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId);
Assert.assertEquals(1, info.getLiveContainers().size());
// And check the default Queue now is full.
queueInfo = scheduler.getQueueInfo(null, false, false);
Assert.assertEquals(1.0f, queueInfo.getCurrentCapacity(), 0.0f);
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestSimpleCapacityReplanner method testReplanningPlanCapacityLoss.
@Test
public void testReplanningPlanCapacityLoss() throws PlanningException {
Resource clusterCapacity = Resource.newInstance(100 * 1024, 100);
Resource minAlloc = Resource.newInstance(1024, 1);
Resource maxAlloc = Resource.newInstance(1024 * 8, 8);
ResourceCalculator res = new DefaultResourceCalculator();
long step = 1L;
Clock clock = mock(Clock.class);
ReservationAgent agent = mock(ReservationAgent.class);
SharingPolicy policy = new NoOverCommitPolicy();
policy.init("root.dedicated", null);
QueueMetrics queueMetrics = mock(QueueMetrics.class);
when(clock.getTime()).thenReturn(0L);
SimpleCapacityReplanner enf = new SimpleCapacityReplanner(clock);
RMContext context = ReservationSystemTestUtil.createMockRMContext();
ReservationSchedulerConfiguration conf = mock(ReservationSchedulerConfiguration.class);
when(conf.getEnforcementWindow(any(String.class))).thenReturn(6L);
enf.init("blah", conf);
// Initialize the plan with more resources
InMemoryPlan plan = new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step, res, minAlloc, maxAlloc, "dedicated", enf, true, context, clock);
// add reservation filling the plan (separating them 1ms, so we are sure
// s2 follows s1 on acceptance
long ts = System.currentTimeMillis();
ReservationId r1 = ReservationId.newInstance(ts, 1);
int[] f5 = { 20, 20, 20, 20, 20 };
ReservationDefinition rDef = ReservationSystemTestUtil.createSimpleReservationDefinition(0, 0 + f5.length, f5.length);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r1, rDef, "u3", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(1L);
ReservationId r2 = ReservationId.newInstance(ts, 2);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r2, rDef, "u4", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(2L);
ReservationId r3 = ReservationId.newInstance(ts, 3);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r3, rDef, "u5", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(3L);
ReservationId r4 = ReservationId.newInstance(ts, 4);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r4, rDef, "u6", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
when(clock.getTime()).thenReturn(4L);
ReservationId r5 = ReservationId.newInstance(ts, 5);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r5, rDef, "u7", "dedicated", 0, 0 + f5.length, generateAllocation(0, f5), res, minAlloc), false));
int[] f6 = { 50, 50, 50, 50, 50 };
ReservationId r6 = ReservationId.newInstance(ts, 6);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r6, rDef, "u3", "dedicated", 10, 10 + f6.length, generateAllocation(10, f6), res, minAlloc), false));
when(clock.getTime()).thenReturn(6L);
ReservationId r7 = ReservationId.newInstance(ts, 7);
assertTrue(plan.toString(), plan.addReservation(new InMemoryReservationAllocation(r7, rDef, "u4", "dedicated", 10, 10 + f6.length, generateAllocation(10, f6), res, minAlloc), false));
// remove some of the resources (requires replanning)
plan.setTotalCapacity(Resource.newInstance(70 * 1024, 70));
when(clock.getTime()).thenReturn(0L);
// run the replanner
enf.plan(plan, null);
// check which reservation are still present
assertNotNull(plan.getReservationById(r1));
assertNotNull(plan.getReservationById(r2));
assertNotNull(plan.getReservationById(r3));
assertNotNull(plan.getReservationById(r6));
assertNotNull(plan.getReservationById(r7));
// and which ones are removed
assertNull(plan.getReservationById(r4));
assertNull(plan.getReservationById(r5));
// check resources at each moment in time no more exceed capacity
for (int i = 0; i < 20; i++) {
long tot = 0;
for (ReservationAllocation r : plan.getReservationsAtTime(i)) {
tot = r.getResourcesAtTime(i).getMemorySize();
}
assertTrue(tot <= 70 * 1024);
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestCapacityOverTimePolicy method setup.
@Before
public void setup() throws Exception {
// 24h window
timeWindow = 86400000L;
// 1 sec step
step = 1000L;
// 25% avg cap on capacity
avgConstraint = 25;
// 70% instantaneous cap on capacity
instConstraint = 70;
initTime = System.currentTimeMillis();
minAlloc = Resource.newInstance(1024, 1);
res = new DefaultResourceCalculator();
maxAlloc = Resource.newInstance(1024 * 8, 8);
mAgent = mock(ReservationAgent.class);
QueueMetrics rootQueueMetrics = mock(QueueMetrics.class);
String reservationQ = ReservationSystemTestUtil.getFullReservationQueueName();
Resource clusterResource = ReservationSystemTestUtil.calculateClusterResource(totCont);
ReservationSchedulerConfiguration conf = ReservationSystemTestUtil.createConf(reservationQ, timeWindow, instConstraint, avgConstraint);
CapacityOverTimePolicy policy = new CapacityOverTimePolicy();
policy.init(reservationQ, conf);
RMContext context = ReservationSystemTestUtil.createMockRMContext();
plan = new InMemoryPlan(rootQueueMetrics, policy, mAgent, clusterResource, step, res, minAlloc, maxAlloc, "dedicated", null, true, context);
}
use of org.apache.hadoop.yarn.server.resourcemanager.RMContext in project hadoop by apache.
the class TestZKRMStateStorePerf method initStore.
private void initStore(String hostPort) {
Optional<String> optHostPort = Optional.fromNullable(hostPort);
RMContext rmContext = mock(RMContext.class);
conf = new YarnConfiguration();
conf.set(YarnConfiguration.RM_ZK_ADDRESS, optHostPort.or((curatorTestingServer == null) ? "" : curatorTestingServer.getConnectString()));
conf.set(YarnConfiguration.ZK_RM_STATE_STORE_PARENT_PATH, workingZnode);
store = new ZKRMStateStore();
store.setResourceManager(new ResourceManager());
store.init(conf);
store.start();
when(rmContext.getStateStore()).thenReturn(store);
appTokenMgr = new AMRMTokenSecretManager(conf, rmContext);
appTokenMgr.start();
clientToAMTokenMgr = new ClientToAMTokenSecretManagerInRM();
}
Aggregations