Search in sources :

Example 6 with RMAppEvent

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent in project hadoop by apache.

the class TestRM method testApplicationKillAtAcceptedState.

/**
   * Validate killing an application when it is at accepted state.
   * @throws Exception exception
   */
@Test(timeout = 60000)
public void testApplicationKillAtAcceptedState() throws Exception {
    final Dispatcher dispatcher = new DrainDispatcher() {

        @Override
        public EventHandler<Event> getEventHandler() {
            class EventArgMatcher extends ArgumentMatcher<AbstractEvent> {

                @Override
                public boolean matches(Object argument) {
                    if (argument instanceof RMAppAttemptEvent) {
                        if (((RMAppAttemptEvent) argument).getType().equals(RMAppAttemptEventType.KILL)) {
                            return true;
                        }
                    }
                    return false;
                }
            }
            EventHandler handler = spy(super.getEventHandler());
            doNothing().when(handler).handle(argThat(new EventArgMatcher()));
            return handler;
        }
    };
    MockRM rm = new MockRM(conf) {

        @Override
        protected Dispatcher createDispatcher() {
            return dispatcher;
        }
    };
    // test metrics
    QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
    int appsKilled = metrics.getAppsKilled();
    int appsSubmitted = metrics.getAppsSubmitted();
    rm.start();
    MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService());
    nm1.registerNode();
    // a failed app
    RMApp application = rm.submitApp(200);
    MockAM am = MockRM.launchAM(application, rm, nm1);
    rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.LAUNCHED);
    nm1.nodeHeartbeat(am.getApplicationAttemptId(), 1, ContainerState.RUNNING);
    rm.waitForState(application.getApplicationId(), RMAppState.ACCEPTED);
    // Now kill the application before new attempt is launched, the app report
    // returns the invalid AM host and port.
    KillApplicationRequest request = KillApplicationRequest.newInstance(application.getApplicationId());
    rm.getClientRMService().forceKillApplication(request);
    // Specific test for YARN-1689 follows
    // Now let's say a race causes AM to register now. This should not crash RM.
    am.registerAppAttempt(false);
    // We explicitly intercepted the kill-event to RMAppAttempt, so app should
    // still be in KILLING state.
    rm.waitForState(application.getApplicationId(), RMAppState.KILLING);
    // AM should now be in running
    rm.waitForState(am.getApplicationAttemptId(), RMAppAttemptState.RUNNING);
    // Simulate that appAttempt is killed.
    rm.getRMContext().getDispatcher().getEventHandler().handle(new RMAppEvent(application.getApplicationId(), RMAppEventType.ATTEMPT_KILLED));
    rm.waitForState(application.getApplicationId(), RMAppState.KILLED);
    // test metrics
    metrics = rm.getResourceScheduler().getRootQueueMetrics();
    Assert.assertEquals(appsKilled + 1, metrics.getAppsKilled());
    Assert.assertEquals(appsSubmitted + 1, metrics.getAppsSubmitted());
}
Also used : DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) EventHandler(org.apache.hadoop.yarn.event.EventHandler) KillApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest) Dispatcher(org.apache.hadoop.yarn.event.Dispatcher) DrainDispatcher(org.apache.hadoop.yarn.event.DrainDispatcher) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) QueueMetrics(org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics) ArgumentMatcher(org.mockito.ArgumentMatcher) AbstractEvent(org.apache.hadoop.yarn.event.AbstractEvent) Event(org.apache.hadoop.yarn.event.Event) RMAppAttemptEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) RMAppAttemptEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent) Test(org.junit.Test)

Example 7 with RMAppEvent

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent in project hadoop by apache.

the class TestFairScheduler method testNotAllowSubmitApplication.

@SuppressWarnings("unchecked")
@Test
public void testNotAllowSubmitApplication() throws Exception {
    // Set acl's
    conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
    PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE));
    out.println("<?xml version=\"1.0\"?>");
    out.println("<allocations>");
    out.println("<queue name=\"root\">");
    out.println("  <aclSubmitApps> </aclSubmitApps>");
    out.println("  <aclAdministerApps> </aclAdministerApps>");
    out.println("  <queue name=\"queue1\">");
    out.println("    <aclSubmitApps>userallow</aclSubmitApps>");
    out.println("    <aclAdministerApps>userallow</aclAdministerApps>");
    out.println("  </queue>");
    out.println("</queue>");
    out.println("</allocations>");
    out.close();
    scheduler.init(conf);
    scheduler.start();
    scheduler.reinitialize(conf, resourceManager.getRMContext());
    int appId = this.APP_ID++;
    String user = "usernotallow";
    String queue = "queue1";
    ApplicationId applicationId = MockApps.newAppID(appId);
    String name = MockApps.newAppName();
    ApplicationMasterService masterService = new ApplicationMasterService(resourceManager.getRMContext(), scheduler);
    ApplicationSubmissionContext submissionContext = new ApplicationSubmissionContextPBImpl();
    ContainerLaunchContext clc = BuilderUtils.newContainerLaunchContext(null, null, null, null, null, null);
    submissionContext.setApplicationId(applicationId);
    submissionContext.setAMContainerSpec(clc);
    RMApp application = new RMAppImpl(applicationId, resourceManager.getRMContext(), conf, name, user, queue, submissionContext, scheduler, masterService, System.currentTimeMillis(), "YARN", null, null);
    resourceManager.getRMContext().getRMApps().putIfAbsent(applicationId, application);
    application.handle(new RMAppEvent(applicationId, RMAppEventType.START));
    final int MAX_TRIES = 20;
    int numTries = 0;
    while (!application.getState().equals(RMAppState.SUBMITTED) && numTries < MAX_TRIES) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ex) {
            ex.printStackTrace();
        }
        numTries++;
    }
    assertEquals("The application doesn't reach SUBMITTED.", RMAppState.SUBMITTED, application.getState());
    ApplicationAttemptId attId = ApplicationAttemptId.newInstance(applicationId, this.ATTEMPT_ID++);
    scheduler.addApplication(attId.getApplicationId(), queue, user, false);
    numTries = 0;
    while (application.getFinishTime() == 0 && numTries < MAX_TRIES) {
        try {
            Thread.sleep(100);
        } catch (InterruptedException ex) {
            ex.printStackTrace();
        }
        numTries++;
    }
    assertEquals(FinalApplicationStatus.FAILED, application.getFinalApplicationStatus());
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) MockRMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) ApplicationMasterService(org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService) FileWriter(java.io.FileWriter) ContainerLaunchContext(org.apache.hadoop.yarn.api.records.ContainerLaunchContext) ApplicationAttemptId(org.apache.hadoop.yarn.api.records.ApplicationAttemptId) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) ApplicationSubmissionContextPBImpl(org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl) ApplicationSubmissionContext(org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) PrintWriter(java.io.PrintWriter) Test(org.junit.Test)

Example 8 with RMAppEvent

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent in project hadoop by apache.

the class TestRMAppLogAggregationStatus method testGetLogAggregationStatusForAppReport.

@Test(timeout = 10000)
public void testGetLogAggregationStatusForAppReport() {
    YarnConfiguration conf = new YarnConfiguration();
    // Disable the log aggregation
    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, false);
    RMAppImpl rmApp = (RMAppImpl) createRMApp(conf);
    // The log aggregation status should be DISABLED.
    Assert.assertEquals(LogAggregationStatus.DISABLED, rmApp.getLogAggregationStatusForAppReport());
    // Enable the log aggregation
    conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
    rmApp = (RMAppImpl) createRMApp(conf);
    // If we do not know any NodeManagers for this application , and
    // the log aggregation is enabled, the log aggregation status will
    // return NOT_START
    Assert.assertEquals(LogAggregationStatus.NOT_START, rmApp.getLogAggregationStatusForAppReport());
    NodeId nodeId1 = NodeId.newInstance("localhost", 1111);
    NodeId nodeId2 = NodeId.newInstance("localhost", 2222);
    NodeId nodeId3 = NodeId.newInstance("localhost", 3333);
    NodeId nodeId4 = NodeId.newInstance("localhost", 4444);
    // If the log aggregation status for all NMs are NOT_START,
    // the log aggregation status for this app will return NOT_START
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    Assert.assertEquals(LogAggregationStatus.NOT_START, rmApp.getLogAggregationStatusForAppReport());
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING, rmApp.getLogAggregationStatusForAppReport());
    rmApp.handle(new RMAppEvent(rmApp.getApplicationId(), RMAppEventType.KILL));
    Assert.assertTrue(RMAppImpl.isAppInFinalState(rmApp));
    // If at least of one log aggregation status for one NM is TIME_OUT,
    // others are SUCCEEDED, the log aggregation status for this app will
    // return TIME_OUT
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.TIME_OUT, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    Assert.assertEquals(LogAggregationStatus.TIME_OUT, rmApp.getLogAggregationStatusForAppReport());
    // If the log aggregation status for all NMs are SUCCEEDED and Application
    // is at the final state, the log aggregation status for this app will
    // return SUCCEEDED
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    Assert.assertEquals(LogAggregationStatus.SUCCEEDED, rmApp.getLogAggregationStatusForAppReport());
    rmApp = (RMAppImpl) createRMApp(conf);
    // If the log aggregation status for at least one of NMs are RUNNING,
    // the log aggregation status for this app will return RUNNING
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING, rmApp.getLogAggregationStatusForAppReport());
    // If the log aggregation status for at least one of NMs
    // are RUNNING_WITH_FAILURE, the log aggregation status
    // for this app will return RUNNING_WITH_FAILURE
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING_WITH_FAILURE, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING_WITH_FAILURE, rmApp.getLogAggregationStatusForAppReport());
    // For node4, the previous log aggregation status is RUNNING_WITH_FAILURE,
    // it will not be changed even it get a new log aggregation status
    // as RUNNING
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.NOT_START, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.RUNNING, ""));
    Assert.assertEquals(LogAggregationStatus.RUNNING_WITH_FAILURE, rmApp.getLogAggregationStatusForAppReport());
    rmApp.handle(new RMAppEvent(rmApp.getApplicationId(), RMAppEventType.KILL));
    Assert.assertTrue(RMAppImpl.isAppInFinalState(rmApp));
    // If at least of one log aggregation status for one NM is FAILED,
    // others are either SUCCEEDED or TIME_OUT, and this application is
    // at the final state, the log aggregation status for this app
    // will return FAILED
    rmApp.aggregateLogReport(nodeId1, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.SUCCEEDED, ""));
    rmApp.aggregateLogReport(nodeId2, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.TIME_OUT, ""));
    rmApp.aggregateLogReport(nodeId3, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.FAILED, ""));
    rmApp.aggregateLogReport(nodeId4, LogAggregationReport.newInstance(rmApp.getApplicationId(), LogAggregationStatus.FAILED, ""));
    Assert.assertEquals(LogAggregationStatus.FAILED, rmApp.getLogAggregationStatusForAppReport());
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) NodeId(org.apache.hadoop.yarn.api.records.NodeId) Test(org.junit.Test)

Example 9 with RMAppEvent

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent in project hadoop by apache.

the class TestClientRMService method testConcurrentAppSubmit.

@Test(timeout = 4000)
public void testConcurrentAppSubmit() throws IOException, InterruptedException, BrokenBarrierException, YarnException {
    YarnScheduler yarnScheduler = mockYarnScheduler();
    RMContext rmContext = mock(RMContext.class);
    mockRMContext(yarnScheduler, rmContext);
    RMStateStore stateStore = mock(RMStateStore.class);
    when(rmContext.getStateStore()).thenReturn(stateStore);
    RMAppManager appManager = new RMAppManager(rmContext, yarnScheduler, null, mock(ApplicationACLsManager.class), new Configuration());
    final ApplicationId appId1 = getApplicationId(100);
    final ApplicationId appId2 = getApplicationId(101);
    final SubmitApplicationRequest submitRequest1 = mockSubmitAppRequest(appId1, null, null);
    final SubmitApplicationRequest submitRequest2 = mockSubmitAppRequest(appId2, null, null);
    final CyclicBarrier startBarrier = new CyclicBarrier(2);
    final CyclicBarrier endBarrier = new CyclicBarrier(2);
    EventHandler<Event> eventHandler = new EventHandler<Event>() {

        @Override
        public void handle(Event rawEvent) {
            if (rawEvent instanceof RMAppEvent) {
                RMAppEvent event = (RMAppEvent) rawEvent;
                if (event.getApplicationId().equals(appId1)) {
                    try {
                        startBarrier.await();
                        endBarrier.await();
                    } catch (BrokenBarrierException e) {
                        LOG.warn("Broken Barrier", e);
                    } catch (InterruptedException e) {
                        LOG.warn("Interrupted while awaiting barriers", e);
                    }
                }
            }
        }
    };
    when(rmContext.getDispatcher().getEventHandler()).thenReturn(eventHandler);
    doReturn(mock(RMTimelineCollectorManager.class)).when(rmContext).getRMTimelineCollectorManager();
    final ClientRMService rmService = new ClientRMService(rmContext, yarnScheduler, appManager, null, null, null);
    rmService.init(new Configuration());
    // submit an app and wait for it to block while in app submission
    Thread t = new Thread() {

        @Override
        public void run() {
            try {
                rmService.submitApplication(submitRequest1);
            } catch (YarnException | IOException e) {
            }
        }
    };
    t.start();
    // submit another app, so go through while the first app is blocked
    startBarrier.await();
    rmService.submitApplication(submitRequest2);
    endBarrier.await();
    t.join();
}
Also used : RMStateStore(org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) CapacitySchedulerConfiguration(org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration) Configuration(org.apache.hadoop.conf.Configuration) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) EventHandler(org.apache.hadoop.yarn.event.EventHandler) IOException(java.io.IOException) SubmitApplicationRequest(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) CyclicBarrier(java.util.concurrent.CyclicBarrier) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) ApplicationACLsManager(org.apache.hadoop.yarn.server.security.ApplicationACLsManager) RMTimelineCollectorManager(org.apache.hadoop.yarn.server.resourcemanager.timelineservice.RMTimelineCollectorManager) YarnScheduler(org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler) Event(org.apache.hadoop.yarn.event.Event) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) Test(org.junit.Test)

Example 10 with RMAppEvent

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent in project hadoop by apache.

the class FairSchedulerTestBase method createApplicationWithAMResource.

protected void createApplicationWithAMResource(ApplicationAttemptId attId, String queue, String user, Resource amResource) {
    RMContext rmContext = resourceManager.getRMContext();
    ApplicationId appId = attId.getApplicationId();
    RMApp rmApp = new RMAppImpl(appId, rmContext, conf, null, user, null, ApplicationSubmissionContext.newInstance(appId, null, queue, null, mock(ContainerLaunchContext.class), false, false, 0, amResource, null), scheduler, null, 0, null, null, null);
    rmContext.getRMApps().put(appId, rmApp);
    RMAppEvent event = new RMAppEvent(appId, RMAppEventType.START);
    resourceManager.getRMContext().getRMApps().get(appId).handle(event);
    event = new RMAppEvent(appId, RMAppEventType.APP_NEW_SAVED);
    resourceManager.getRMContext().getRMApps().get(appId).handle(event);
    event = new RMAppEvent(appId, RMAppEventType.APP_ACCEPTED);
    resourceManager.getRMContext().getRMApps().get(appId).handle(event);
    AppAddedSchedulerEvent appAddedEvent = new AppAddedSchedulerEvent(appId, queue, user);
    scheduler.handle(appAddedEvent);
    AppAttemptAddedSchedulerEvent attempAddedEvent = new AppAttemptAddedSchedulerEvent(attId, false);
    scheduler.handle(attempAddedEvent);
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) RMApp(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp) AppAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent) AppAttemptAddedSchedulerEvent(org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId)

Aggregations

RMAppEvent (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent)21 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)8 Test (org.junit.Test)7 RMApp (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp)6 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)5 RMAppImpl (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl)5 Event (org.apache.hadoop.yarn.event.Event)4 SchedulerApplication (org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication)4 Configuration (org.apache.hadoop.conf.Configuration)3 AccessControlException (org.apache.hadoop.security.AccessControlException)3 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)3 YarnException (org.apache.hadoop.yarn.exceptions.YarnException)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 IOException (java.io.IOException)2 Credentials (org.apache.hadoop.security.Credentials)2 SubmitApplicationRequest (org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)2 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)2 ContainerLaunchContext (org.apache.hadoop.yarn.api.records.ContainerLaunchContext)2 NodeId (org.apache.hadoop.yarn.api.records.NodeId)2 EventHandler (org.apache.hadoop.yarn.event.EventHandler)2