use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent in project hadoop by apache.
the class TestRMAppTransitions method sendAttemptUpdateSavedEvent.
private void sendAttemptUpdateSavedEvent(RMApp application) {
application.getCurrentAppAttempt().handle(new RMAppAttemptEvent(application.getCurrentAppAttempt().getAppAttemptId(), RMAppAttemptEventType.ATTEMPT_UPDATE_SAVED));
rmDispatcher.await();
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent in project hadoop by apache.
the class CapacityScheduler method addApplicationAttempt.
private void addApplicationAttempt(ApplicationAttemptId applicationAttemptId, boolean transferStateFromPreviousAttempt, boolean isAttemptRecovering) {
try {
writeLock.lock();
SchedulerApplication<FiCaSchedulerApp> application = applications.get(applicationAttemptId.getApplicationId());
if (application == null) {
LOG.warn("Application " + applicationAttemptId.getApplicationId() + " cannot be found in scheduler.");
return;
}
CSQueue queue = (CSQueue) application.getQueue();
FiCaSchedulerApp attempt = new FiCaSchedulerApp(applicationAttemptId, application.getUser(), queue, queue.getAbstractUsersManager(), rmContext, application.getPriority(), isAttemptRecovering, activitiesManager);
if (transferStateFromPreviousAttempt) {
attempt.transferStateFromPreviousAttempt(application.getCurrentAppAttempt());
}
application.setCurrentAppAttempt(attempt);
// Update attempt priority to the latest to avoid race condition i.e
// SchedulerApplicationAttempt is created with old priority but it is not
// set to SchedulerApplication#setCurrentAppAttempt.
// Scenario would occur is
// 1. SchdulerApplicationAttempt is created with old priority.
// 2. updateApplicationPriority() updates SchedulerApplication. Since
// currentAttempt is null, it just return.
// 3. ScheduelerApplcationAttempt is set in
// SchedulerApplication#setCurrentAppAttempt.
attempt.setPriority(application.getPriority());
queue.submitApplicationAttempt(attempt, application.getUser());
LOG.info("Added Application Attempt " + applicationAttemptId + " to scheduler from user " + application.getUser() + " in queue " + queue.getQueueName());
if (isAttemptRecovering) {
if (LOG.isDebugEnabled()) {
LOG.debug(applicationAttemptId + " is recovering. Skipping notifying ATTEMPT_ADDED");
}
} else {
rmContext.getDispatcher().getEventHandler().handle(new RMAppAttemptEvent(applicationAttemptId, RMAppAttemptEventType.ATTEMPT_ADDED));
}
} finally {
writeLock.unlock();
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent in project hadoop by apache.
the class TestRMContainerImpl method testExpireWhileRunning.
@Test
public void testExpireWhileRunning() {
DrainDispatcher drainDispatcher = new DrainDispatcher();
EventHandler<RMAppAttemptEvent> appAttemptEventHandler = mock(EventHandler.class);
EventHandler generic = mock(EventHandler.class);
drainDispatcher.register(RMAppAttemptEventType.class, appAttemptEventHandler);
drainDispatcher.register(RMNodeEventType.class, generic);
drainDispatcher.init(new YarnConfiguration());
drainDispatcher.start();
NodeId nodeId = BuilderUtils.newNodeId("host", 3425);
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
ContainerAllocationExpirer expirer = mock(ContainerAllocationExpirer.class);
Resource resource = BuilderUtils.newResource(512, 1);
Priority priority = BuilderUtils.newPriority(5);
Container container = BuilderUtils.newContainer(containerId, nodeId, "host:3465", resource, priority, null);
ConcurrentMap<ApplicationId, RMApp> appMap = new ConcurrentHashMap<>();
RMApp rmApp = mock(RMApp.class);
appMap.putIfAbsent(appId, rmApp);
RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class);
SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getDispatcher()).thenReturn(drainDispatcher);
when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer);
when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer);
when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher);
YarnConfiguration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.APPLICATION_HISTORY_SAVE_NON_AM_CONTAINER_META_INFO, true);
when(rmContext.getYarnConfiguration()).thenReturn(conf);
when(rmContext.getRMApps()).thenReturn(appMap);
RMContainer rmContainer = new RMContainerImpl(container, SchedulerRequestKey.extractFrom(container), appAttemptId, nodeId, "user", rmContext);
assertEquals(RMContainerState.NEW, rmContainer.getState());
assertEquals(resource, rmContainer.getAllocatedResource());
assertEquals(nodeId, rmContainer.getAllocatedNode());
assertEquals(priority, rmContainer.getAllocatedSchedulerKey().getPriority());
verify(writer).containerStarted(any(RMContainer.class));
verify(publisher).containerCreated(any(RMContainer.class), anyLong());
rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START));
drainDispatcher.await();
assertEquals(RMContainerState.ALLOCATED, rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.ACQUIRED));
drainDispatcher.await();
assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());
rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.LAUNCHED));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
assertEquals("http://host:3465/node/containerlogs/container_1_0001_01_000001/user", rmContainer.getLogURL());
// In RUNNING state. Verify EXPIRE and associated actions.
reset(appAttemptEventHandler);
ContainerStatus containerStatus = SchedulerUtils.createAbnormalContainerStatus(containerId, SchedulerUtils.EXPIRED_CONTAINER);
rmContainer.handle(new RMContainerFinishedEvent(containerId, containerStatus, RMContainerEventType.EXPIRE));
drainDispatcher.await();
assertEquals(RMContainerState.RUNNING, rmContainer.getState());
verify(writer, never()).containerFinished(any(RMContainer.class));
verify(publisher, never()).containerFinished(any(RMContainer.class), anyLong());
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent in project hadoop by apache.
the class MockRM method sendAMLaunched.
/**
* recommend to use launchAM, or use sendAMLaunched like:
* 1, wait RMAppAttempt scheduled
* 2, send node heartbeat
* 3, sendAMLaunched
*/
public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId) throws Exception {
MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
waitForState(appAttemptId, RMAppAttemptState.ALLOCATED);
//create and set AMRMToken
Token<AMRMTokenIdentifier> amrmToken = this.rmContext.getAMRMTokenSecretManager().createAndGetAMRMToken(appAttemptId);
((RMAppAttemptImpl) this.rmContext.getRMApps().get(appAttemptId.getApplicationId()).getRMAppAttempt(appAttemptId)).setAMRMToken(amrmToken);
getRMContext().getDispatcher().getEventHandler().handle(new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED));
drainEventsImplicitly();
return am;
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent in project hadoop by apache.
the class MockRM method sendAMLaunchFailed.
public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId) throws Exception {
MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
waitForState(am.getApplicationAttemptId(), RMAppAttemptState.ALLOCATED);
getRMContext().getDispatcher().getEventHandler().handle(new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCH_FAILED, "Failed"));
drainEventsImplicitly();
}
Aggregations