use of org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application in project hadoop by apache.
the class TestNodeStatusUpdater method testCompletedContainersIsRecentlyStopped.
@Test(timeout = 10000)
public void testCompletedContainersIsRecentlyStopped() throws Exception {
NodeManager nm = new NodeManager();
nm.init(conf);
NodeStatusUpdaterImpl nodeStatusUpdater = (NodeStatusUpdaterImpl) nm.getNodeStatusUpdater();
ApplicationId appId = ApplicationId.newInstance(0, 0);
Application completedApp = mock(Application.class);
when(completedApp.getApplicationState()).thenReturn(ApplicationState.FINISHED);
ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 0);
ContainerId containerId = ContainerId.newContainerId(appAttemptId, 1);
Token containerToken = BuilderUtils.newContainerToken(containerId, 0, "host", 1234, "user", BuilderUtils.newResource(1024, 1), 0, 123, "password".getBytes(), 0);
Container completedContainer = new ContainerImpl(conf, null, null, null, null, BuilderUtils.newContainerTokenIdentifier(containerToken), nm.getNMContext()) {
@Override
public ContainerState getCurrentState() {
return ContainerState.COMPLETE;
}
};
nm.getNMContext().getApplications().putIfAbsent(appId, completedApp);
nm.getNMContext().getContainers().put(containerId, completedContainer);
Assert.assertEquals(1, nodeStatusUpdater.getContainerStatuses().size());
Assert.assertTrue(nodeStatusUpdater.isContainerRecentlyStopped(containerId));
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application in project hadoop by apache.
the class ContainerManagerImpl method recoverContainer.
private void recoverContainer(RecoveredContainerState rcs) throws IOException {
StartContainerRequest req = rcs.getStartRequest();
ContainerLaunchContext launchContext = req.getContainerLaunchContext();
ContainerTokenIdentifier token = BuilderUtils.newContainerTokenIdentifier(req.getContainerToken());
ContainerId containerId = token.getContainerID();
ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId();
LOG.info("Recovering " + containerId + " in state " + rcs.getStatus() + " with exit code " + rcs.getExitCode());
Application app = context.getApplications().get(appId);
if (app != null) {
recoverActiveContainer(app, launchContext, token, rcs);
if (rcs.getRecoveryType() == RecoveredContainerType.KILL) {
dispatcher.getEventHandler().handle(new ContainerKillEvent(containerId, ContainerExitStatus.ABORTED, "Due to invalid StateStore info container was killed" + " during recovery"));
}
} else {
if (rcs.getStatus() != RecoveredContainerStatus.COMPLETED) {
LOG.warn(containerId + " has no corresponding application!");
}
LOG.info("Adding " + containerId + " to recently stopped containers");
nodeStatusUpdater.addCompletedContainer(containerId);
}
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application in project hadoop by apache.
the class ContainerManagerImpl method cleanUpApplicationsOnNMShutDown.
public void cleanUpApplicationsOnNMShutDown() {
Map<ApplicationId, Application> applications = this.context.getApplications();
if (applications.isEmpty()) {
return;
}
LOG.info("Applications still running : " + applications.keySet());
if (this.context.getNMStateStore().canRecover() && !this.context.getDecommissioned()) {
if (getConfig().getBoolean(YarnConfiguration.NM_RECOVERY_SUPERVISED, YarnConfiguration.DEFAULT_NM_RECOVERY_SUPERVISED)) {
// do not cleanup apps as they can be recovered on restart
return;
}
}
List<ApplicationId> appIds = new ArrayList<ApplicationId>(applications.keySet());
this.handle(new CMgrCompletedAppsEvent(appIds, CMgrCompletedAppsEvent.Reason.ON_SHUTDOWN));
LOG.info("Waiting for Applications to be Finished");
long waitStartTime = System.currentTimeMillis();
while (!applications.isEmpty() && System.currentTimeMillis() - waitStartTime < waitForContainersOnShutdownMillis) {
try {
Thread.sleep(1000);
} catch (InterruptedException ex) {
LOG.warn("Interrupted while sleeping on applications finish on shutdown", ex);
}
}
// All applications Finished
if (applications.isEmpty()) {
LOG.info("All applications in FINISHED state");
} else {
LOG.info("Done waiting for Applications to be Finished. Still alive: " + applications.keySet());
}
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application in project hadoop by apache.
the class ContainersLauncher method handle.
@Override
public void handle(ContainersLauncherEvent event) {
// TODO: ContainersLauncher launches containers one by one!!
Container container = event.getContainer();
ContainerId containerId = container.getContainerId();
switch(event.getType()) {
case LAUNCH_CONTAINER:
Application app = context.getApplications().get(containerId.getApplicationAttemptId().getApplicationId());
ContainerLaunch launch = new ContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(launch);
running.put(containerId, launch);
break;
case RELAUNCH_CONTAINER:
app = context.getApplications().get(containerId.getApplicationAttemptId().getApplicationId());
ContainerRelaunch relaunch = new ContainerRelaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(relaunch);
running.put(containerId, relaunch);
break;
case RECOVER_CONTAINER:
app = context.getApplications().get(containerId.getApplicationAttemptId().getApplicationId());
launch = new RecoveredContainerLaunch(context, getConfig(), dispatcher, exec, app, event.getContainer(), dirsHandler, containerManager);
containerLauncher.submit(launch);
running.put(containerId, launch);
break;
case CLEANUP_CONTAINER:
case CLEANUP_CONTAINER_FOR_REINIT:
ContainerLaunch launcher = running.remove(containerId);
if (launcher == null) {
// Container not launched. So nothing needs to be done.
return;
}
// no sub-processes are alive.
try {
launcher.cleanupContainer();
} catch (IOException e) {
LOG.warn("Got exception while cleaning container " + containerId + ". Ignoring.");
}
break;
case SIGNAL_CONTAINER:
SignalContainersLauncherEvent signalEvent = (SignalContainersLauncherEvent) event;
ContainerLaunch runningContainer = running.get(containerId);
if (runningContainer == null) {
// Container not launched. So nothing needs to be done.
LOG.info("Container " + containerId + " not running, nothing to signal.");
return;
}
try {
runningContainer.signalContainer(signalEvent.getCommand());
} catch (IOException e) {
LOG.warn("Got exception while signaling container " + containerId + " with command " + signalEvent.getCommand());
}
break;
}
}
use of org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application in project hadoop by apache.
the class NMWebAppFilter method containerLogPageRedirectPath.
private String containerLogPageRedirectPath(HttpServletRequest request) {
String uri = HtmlQuoting.quoteHtmlChars(request.getRequestURI());
String redirectPath = null;
if (!uri.contains("/ws/v1/node") && uri.contains("/containerlogs")) {
String[] parts = uri.split("/");
String containerIdStr = parts[3];
String appOwner = parts[4];
String logType = null;
if (parts.length > 5) {
logType = parts[5];
}
if (containerIdStr != null && !containerIdStr.isEmpty()) {
ContainerId containerId = null;
try {
containerId = ContainerId.fromString(containerIdStr);
} catch (IllegalArgumentException ex) {
return redirectPath;
}
ApplicationId appId = containerId.getApplicationAttemptId().getApplicationId();
Application app = nmContext.getApplications().get(appId);
Configuration nmConf = nmContext.getLocalDirsHandler().getConfig();
if (app == null && nmConf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, YarnConfiguration.DEFAULT_LOG_AGGREGATION_ENABLED)) {
String logServerUrl = nmConf.get(YarnConfiguration.YARN_LOG_SERVER_URL);
if (logServerUrl != null && !logServerUrl.isEmpty()) {
StringBuilder sb = new StringBuilder();
sb.append(logServerUrl);
sb.append("/");
sb.append(nmContext.getNodeId().toString());
sb.append("/");
sb.append(containerIdStr);
sb.append("/");
sb.append(containerIdStr);
sb.append("/");
sb.append(appOwner);
if (logType != null && !logType.isEmpty()) {
sb.append("/");
sb.append(logType);
}
redirectPath = WebAppUtils.appendQueryParams(request, sb.toString());
} else {
injector.getInstance(RequestContext.class).set(ContainerLogsPage.REDIRECT_URL, "false");
}
}
}
}
return redirectPath;
}
Aggregations