use of org.apache.hadoop.yarn.event.InlineDispatcher in project hadoop by apache.
the class TestFifoScheduler method testAppAttemptMetrics.
@Test(timeout = 5000)
public void testAppAttemptMetrics() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
FifoScheduler scheduler = new FifoScheduler();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, null, null, null, scheduler);
((RMContextImpl) rmContext).setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
Configuration conf = new Configuration();
((RMContextImpl) rmContext).setScheduler(scheduler);
scheduler.setRMContext(rmContext);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(conf, rmContext);
QueueMetrics metrics = scheduler.getRootQueueMetrics();
int beforeAppsSubmitted = metrics.getAppsSubmitted();
ApplicationId appId = BuilderUtils.newApplicationId(200, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId, "queue", "user");
scheduler.handle(appEvent);
SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 2);
SchedulerEvent attemptEvent2 = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent2);
int afterAppsSubmitted = metrics.getAppsSubmitted();
Assert.assertEquals(1, afterAppsSubmitted - beforeAppsSubmitted);
scheduler.stop();
}
use of org.apache.hadoop.yarn.event.InlineDispatcher in project hadoop by apache.
the class TestFifoScheduler method testUpdateResourceOnNode.
@Test(timeout = 2000)
public void testUpdateResourceOnNode() throws Exception {
AsyncDispatcher dispatcher = new InlineDispatcher();
Configuration conf = new Configuration();
RMContainerTokenSecretManager containerTokenSecretManager = new RMContainerTokenSecretManager(conf);
containerTokenSecretManager.rollMasterKey();
NMTokenSecretManagerInRM nmTokenSecretManager = new NMTokenSecretManagerInRM(conf);
nmTokenSecretManager.rollMasterKey();
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
FifoScheduler scheduler = new FifoScheduler();
RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, scheduler);
rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class));
rmContext.setRMApplicationHistoryWriter(mock(RMApplicationHistoryWriter.class));
((RMContextImpl) rmContext).setYarnConfiguration(new YarnConfiguration());
NullRMNodeLabelsManager nlm = new NullRMNodeLabelsManager();
nlm.init(new Configuration());
rmContext.setNodeLabelManager(nlm);
scheduler.setRMContext(rmContext);
((RMContextImpl) rmContext).setScheduler(scheduler);
scheduler.init(conf);
scheduler.start();
scheduler.reinitialize(new Configuration(), rmContext);
RMNode node0 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 4), 1, "127.0.0.1");
NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node0);
scheduler.handle(nodeEvent1);
assertEquals(scheduler.getNumClusterNodes(), 1);
Resource newResource = Resources.createResource(1024, 4);
NodeResourceUpdateSchedulerEvent node0ResourceUpdate = new NodeResourceUpdateSchedulerEvent(node0, ResourceOption.newInstance(newResource, ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT));
scheduler.handle(node0ResourceUpdate);
// SchedulerNode's total resource and available resource are changed.
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()).getTotalResource().getMemorySize());
assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()).getUnallocatedResource().getMemorySize(), 1024);
QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false);
Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f);
int _appId = 1;
int _appAttemptId = 1;
ApplicationAttemptId appAttemptId = createAppAttemptId(_appId, _appAttemptId);
createMockRMApp(appAttemptId, rmContext);
AppAddedSchedulerEvent appEvent = new AppAddedSchedulerEvent(appAttemptId.getApplicationId(), "queue1", "user1");
scheduler.handle(appEvent);
AppAttemptAddedSchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId, false);
scheduler.handle(attemptEvent);
int memory = 1024;
int priority = 1;
List<ResourceRequest> ask = new ArrayList<ResourceRequest>();
ResourceRequest nodeLocal = createResourceRequest(memory, node0.getHostName(), priority, 1);
ResourceRequest rackLocal = createResourceRequest(memory, node0.getRackName(), priority, 1);
ResourceRequest any = createResourceRequest(memory, ResourceRequest.ANY, priority, 1);
ask.add(nodeLocal);
ask.add(rackLocal);
ask.add(any);
scheduler.allocate(appAttemptId, ask, new ArrayList<ContainerId>(), null, null, NULL_UPDATE_REQUESTS);
// Before the node update event, there are one local request
Assert.assertEquals(1, nodeLocal.getNumContainers());
NodeUpdateSchedulerEvent node0Update = new NodeUpdateSchedulerEvent(node0);
// Now schedule.
scheduler.handle(node0Update);
// After the node update event, check no local request
Assert.assertEquals(0, nodeLocal.getNumContainers());
// Also check that one container was scheduled
SchedulerAppReport info = scheduler.getSchedulerAppInfo(appAttemptId);
Assert.assertEquals(1, info.getLiveContainers().size());
// And check the default Queue now is full.
queueInfo = scheduler.getQueueInfo(null, false, false);
Assert.assertEquals(1.0f, queueInfo.getCurrentCapacity(), 0.0f);
}
use of org.apache.hadoop.yarn.event.InlineDispatcher in project hadoop by apache.
the class TestJobImpl method testAbortJobCalledAfterKillingTasks.
@Test
public void testAbortJobCalledAfterKillingTasks() throws IOException {
Configuration conf = new Configuration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
conf.set(MRJobConfig.MR_AM_COMMITTER_CANCEL_TIMEOUT_MS, "1000");
InlineDispatcher dispatcher = new InlineDispatcher();
dispatcher.init(conf);
dispatcher.start();
OutputCommitter committer = Mockito.mock(OutputCommitter.class);
CommitterEventHandler commitHandler = createCommitterEventHandler(dispatcher, committer);
commitHandler.init(conf);
commitHandler.start();
JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null);
//Fail one task. This should land the JobImpl in the FAIL_WAIT state
job.handle(new JobTaskEvent(MRBuilderUtils.newTaskId(job.getID(), 1, TaskType.MAP), TaskState.FAILED));
//Verify abort job hasn't been called
Mockito.verify(committer, Mockito.never()).abortJob((JobContext) Mockito.any(), (State) Mockito.any());
assertJobState(job, JobStateInternal.FAIL_WAIT);
//Verify abortJob is called once and the job failed
Mockito.verify(committer, Mockito.timeout(2000).times(1)).abortJob((JobContext) Mockito.any(), (State) Mockito.any());
assertJobState(job, JobStateInternal.FAILED);
dispatcher.stop();
}
use of org.apache.hadoop.yarn.event.InlineDispatcher in project hadoop by apache.
the class TestNonAggregatingLogHandler method testHandlingApplicationFinishedEvent.
@Test
public void testHandlingApplicationFinishedEvent() throws IOException {
DeletionService delService = new DeletionService(null);
NonAggregatingLogHandler aggregatingLogHandler = new NonAggregatingLogHandler(new InlineDispatcher(), delService, dirsHandler, new NMNullStateStoreService());
dirsHandler.init(conf);
dirsHandler.start();
delService.init(conf);
delService.start();
aggregatingLogHandler.init(conf);
aggregatingLogHandler.start();
// It should NOT throw RejectedExecutionException
aggregatingLogHandler.handle(new LogHandlerAppFinishedEvent(appId));
aggregatingLogHandler.stop();
// It should NOT throw RejectedExecutionException after stopping
// handler service.
aggregatingLogHandler.handle(new LogHandlerAppFinishedEvent(appId));
aggregatingLogHandler.close();
}
Aggregations