Search in sources :

Example 21 with RMAppImpl

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.

the class TestHadoopArchiveLogs method testFilterAppsByAggregatedStatus.

@Test(timeout = 30000)
public void testFilterAppsByAggregatedStatus() throws Exception {
    try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1, 1, 1, 1)) {
        Configuration conf = new Configuration();
        conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
        yarnCluster.init(conf);
        yarnCluster.start();
        conf = yarnCluster.getConfig();
        RMContext rmContext = yarnCluster.getResourceManager().getRMContext();
        RMAppImpl appImpl1 = (RMAppImpl) createRMApp(1, conf, rmContext, LogAggregationStatus.DISABLED);
        RMAppImpl appImpl2 = (RMAppImpl) createRMApp(2, conf, rmContext, LogAggregationStatus.FAILED);
        RMAppImpl appImpl3 = (RMAppImpl) createRMApp(3, conf, rmContext, LogAggregationStatus.NOT_START);
        RMAppImpl appImpl4 = (RMAppImpl) createRMApp(4, conf, rmContext, LogAggregationStatus.SUCCEEDED);
        RMAppImpl appImpl5 = (RMAppImpl) createRMApp(5, conf, rmContext, LogAggregationStatus.RUNNING);
        RMAppImpl appImpl6 = (RMAppImpl) createRMApp(6, conf, rmContext, LogAggregationStatus.RUNNING_WITH_FAILURE);
        RMAppImpl appImpl7 = (RMAppImpl) createRMApp(7, conf, rmContext, LogAggregationStatus.TIME_OUT);
        RMAppImpl appImpl8 = (RMAppImpl) createRMApp(8, conf, rmContext, LogAggregationStatus.SUCCEEDED);
        rmContext.getRMApps().put(appImpl1.getApplicationId(), appImpl1);
        rmContext.getRMApps().put(appImpl2.getApplicationId(), appImpl2);
        rmContext.getRMApps().put(appImpl3.getApplicationId(), appImpl3);
        rmContext.getRMApps().put(appImpl4.getApplicationId(), appImpl4);
        rmContext.getRMApps().put(appImpl5.getApplicationId(), appImpl5);
        rmContext.getRMApps().put(appImpl6.getApplicationId(), appImpl6);
        rmContext.getRMApps().put(appImpl7.getApplicationId(), appImpl7);
        // appImpl8 is not in the RM
        HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
        Assert.assertEquals(0, hal.eligibleApplications.size());
        hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl1.getApplicationId().toString(), USER));
        hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl2.getApplicationId().toString(), USER));
        hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl3.getApplicationId().toString(), USER));
        HadoopArchiveLogs.AppInfo app4 = new HadoopArchiveLogs.AppInfo(appImpl4.getApplicationId().toString(), USER);
        hal.eligibleApplications.add(app4);
        hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl5.getApplicationId().toString(), USER));
        hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl6.getApplicationId().toString(), USER));
        HadoopArchiveLogs.AppInfo app7 = new HadoopArchiveLogs.AppInfo(appImpl7.getApplicationId().toString(), USER);
        hal.eligibleApplications.add(app7);
        HadoopArchiveLogs.AppInfo app8 = new HadoopArchiveLogs.AppInfo(appImpl8.getApplicationId().toString(), USER);
        hal.eligibleApplications.add(app8);
        Assert.assertEquals(8, hal.eligibleApplications.size());
        hal.filterAppsByAggregatedStatus();
        Assert.assertEquals(3, hal.eligibleApplications.size());
        Assert.assertTrue(hal.eligibleApplications.contains(app4));
        Assert.assertTrue(hal.eligibleApplications.contains(app7));
        Assert.assertTrue(hal.eligibleApplications.contains(app8));
    }
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMContext(org.apache.hadoop.yarn.server.resourcemanager.RMContext) YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) Configuration(org.apache.hadoop.conf.Configuration) MiniYARNCluster(org.apache.hadoop.yarn.server.MiniYARNCluster) Test(org.junit.Test)

Example 22 with RMAppImpl

use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.

the class RMAppManager method submitApplication.

@SuppressWarnings("unchecked")
protected void submitApplication(ApplicationSubmissionContext submissionContext, long submitTime, String user) throws YarnException {
    ApplicationId applicationId = submissionContext.getApplicationId();
    // Passing start time as -1. It will be eventually set in RMAppImpl
    // constructor.
    RMAppImpl application = createAndPopulateNewRMApp(submissionContext, submitTime, user, false, -1);
    try {
        if (UserGroupInformation.isSecurityEnabled()) {
            this.rmContext.getDelegationTokenRenewer().addApplicationAsync(applicationId, BuilderUtils.parseCredentials(submissionContext), submissionContext.getCancelTokensWhenComplete(), application.getUser(), BuilderUtils.parseTokensConf(submissionContext));
        } else {
            // Dispatcher is not yet started at this time, so these START events
            // enqueued should be guaranteed to be first processed when dispatcher
            // gets started.
            this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.START));
        }
    } catch (Exception e) {
        LOG.warn("Unable to parse credentials for " + applicationId, e);
        // Sending APP_REJECTED is fine, since we assume that the
        // RMApp is in NEW state and thus we haven't yet informed the
        // scheduler about the existence of the application
        this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, e.getMessage()));
        throw RPCUtil.getRemoteException(e);
    }
}
Also used : RMAppImpl(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl) RMAppEvent(org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) InvalidResourceRequestException(org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException) YarnException(org.apache.hadoop.yarn.exceptions.YarnException) InvalidLabelResourceRequestException(org.apache.hadoop.yarn.exceptions.InvalidLabelResourceRequestException) AccessControlException(org.apache.hadoop.security.AccessControlException)

Aggregations

RMAppImpl (org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl)22 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)15 ApplicationSubmissionContext (org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext)12 Test (org.junit.Test)12 YarnConfiguration (org.apache.hadoop.yarn.conf.YarnConfiguration)11 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)10 RMAppAttemptImpl (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptImpl)10 Container (org.apache.hadoop.yarn.api.records.Container)9 MockRM (org.apache.hadoop.yarn.server.resourcemanager.MockRM)9 RMAppAttemptMetrics (org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptMetrics)9 AppAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent)9 AppAttemptAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent)9 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)8 AppAttemptRemovedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent)8 NodeAddedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent)8 SchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent)8 ContainerExpiredSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.ContainerExpiredSchedulerEvent)7 NodeRemovedSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent)7 NodeUpdateSchedulerEvent (org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent)7 Configuration (org.apache.hadoop.conf.Configuration)6