use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.
the class TestHadoopArchiveLogs method testFilterAppsByAggregatedStatus.
@Test(timeout = 30000)
public void testFilterAppsByAggregatedStatus() throws Exception {
try (MiniYARNCluster yarnCluster = new MiniYARNCluster(TestHadoopArchiveLogs.class.getSimpleName(), 1, 1, 1, 1)) {
Configuration conf = new Configuration();
conf.setBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, true);
yarnCluster.init(conf);
yarnCluster.start();
conf = yarnCluster.getConfig();
RMContext rmContext = yarnCluster.getResourceManager().getRMContext();
RMAppImpl appImpl1 = (RMAppImpl) createRMApp(1, conf, rmContext, LogAggregationStatus.DISABLED);
RMAppImpl appImpl2 = (RMAppImpl) createRMApp(2, conf, rmContext, LogAggregationStatus.FAILED);
RMAppImpl appImpl3 = (RMAppImpl) createRMApp(3, conf, rmContext, LogAggregationStatus.NOT_START);
RMAppImpl appImpl4 = (RMAppImpl) createRMApp(4, conf, rmContext, LogAggregationStatus.SUCCEEDED);
RMAppImpl appImpl5 = (RMAppImpl) createRMApp(5, conf, rmContext, LogAggregationStatus.RUNNING);
RMAppImpl appImpl6 = (RMAppImpl) createRMApp(6, conf, rmContext, LogAggregationStatus.RUNNING_WITH_FAILURE);
RMAppImpl appImpl7 = (RMAppImpl) createRMApp(7, conf, rmContext, LogAggregationStatus.TIME_OUT);
RMAppImpl appImpl8 = (RMAppImpl) createRMApp(8, conf, rmContext, LogAggregationStatus.SUCCEEDED);
rmContext.getRMApps().put(appImpl1.getApplicationId(), appImpl1);
rmContext.getRMApps().put(appImpl2.getApplicationId(), appImpl2);
rmContext.getRMApps().put(appImpl3.getApplicationId(), appImpl3);
rmContext.getRMApps().put(appImpl4.getApplicationId(), appImpl4);
rmContext.getRMApps().put(appImpl5.getApplicationId(), appImpl5);
rmContext.getRMApps().put(appImpl6.getApplicationId(), appImpl6);
rmContext.getRMApps().put(appImpl7.getApplicationId(), appImpl7);
// appImpl8 is not in the RM
HadoopArchiveLogs hal = new HadoopArchiveLogs(conf);
Assert.assertEquals(0, hal.eligibleApplications.size());
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl1.getApplicationId().toString(), USER));
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl2.getApplicationId().toString(), USER));
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl3.getApplicationId().toString(), USER));
HadoopArchiveLogs.AppInfo app4 = new HadoopArchiveLogs.AppInfo(appImpl4.getApplicationId().toString(), USER);
hal.eligibleApplications.add(app4);
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl5.getApplicationId().toString(), USER));
hal.eligibleApplications.add(new HadoopArchiveLogs.AppInfo(appImpl6.getApplicationId().toString(), USER));
HadoopArchiveLogs.AppInfo app7 = new HadoopArchiveLogs.AppInfo(appImpl7.getApplicationId().toString(), USER);
hal.eligibleApplications.add(app7);
HadoopArchiveLogs.AppInfo app8 = new HadoopArchiveLogs.AppInfo(appImpl8.getApplicationId().toString(), USER);
hal.eligibleApplications.add(app8);
Assert.assertEquals(8, hal.eligibleApplications.size());
hal.filterAppsByAggregatedStatus();
Assert.assertEquals(3, hal.eligibleApplications.size());
Assert.assertTrue(hal.eligibleApplications.contains(app4));
Assert.assertTrue(hal.eligibleApplications.contains(app7));
Assert.assertTrue(hal.eligibleApplications.contains(app8));
}
}
use of org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl in project hadoop by apache.
the class RMAppManager method submitApplication.
@SuppressWarnings("unchecked")
protected void submitApplication(ApplicationSubmissionContext submissionContext, long submitTime, String user) throws YarnException {
ApplicationId applicationId = submissionContext.getApplicationId();
// Passing start time as -1. It will be eventually set in RMAppImpl
// constructor.
RMAppImpl application = createAndPopulateNewRMApp(submissionContext, submitTime, user, false, -1);
try {
if (UserGroupInformation.isSecurityEnabled()) {
this.rmContext.getDelegationTokenRenewer().addApplicationAsync(applicationId, BuilderUtils.parseCredentials(submissionContext), submissionContext.getCancelTokensWhenComplete(), application.getUser(), BuilderUtils.parseTokensConf(submissionContext));
} else {
// Dispatcher is not yet started at this time, so these START events
// enqueued should be guaranteed to be first processed when dispatcher
// gets started.
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.START));
}
} catch (Exception e) {
LOG.warn("Unable to parse credentials for " + applicationId, e);
// Sending APP_REJECTED is fine, since we assume that the
// RMApp is in NEW state and thus we haven't yet informed the
// scheduler about the existence of the application
this.rmContext.getDispatcher().getEventHandler().handle(new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, e.getMessage()));
throw RPCUtil.getRemoteException(e);
}
}
Aggregations