use of org.apache.hadoop.yarn.api.records.LogAggregationContext in project hadoop by apache.
the class ContainerManagerImpl method startContainerInternal.
@SuppressWarnings("unchecked")
protected void startContainerInternal(ContainerTokenIdentifier containerTokenIdentifier, StartContainerRequest request) throws YarnException, IOException {
ContainerId containerId = containerTokenIdentifier.getContainerID();
String containerIdStr = containerId.toString();
String user = containerTokenIdentifier.getApplicationSubmitter();
LOG.info("Start request for " + containerIdStr + " by user " + user);
ContainerLaunchContext launchContext = request.getContainerLaunchContext();
Credentials credentials = YarnServerSecurityUtils.parseCredentials(launchContext);
Container container = new ContainerImpl(getConfig(), this.dispatcher, launchContext, credentials, metrics, containerTokenIdentifier, context);
ApplicationId applicationID = containerId.getApplicationAttemptId().getApplicationId();
if (context.getContainers().putIfAbsent(containerId, container) != null) {
NMAuditLogger.logFailure(user, AuditConstants.START_CONTAINER, "ContainerManagerImpl", "Container already running on this node!", applicationID, containerId);
throw RPCUtil.getRemoteException("Container " + containerIdStr + " already is running on this node!!");
}
this.readLock.lock();
try {
if (!isServiceStopped()) {
// Create the application
// populate the flow context from the launch context if the timeline
// service v.2 is enabled
FlowContext flowContext = null;
if (YarnConfiguration.timelineServiceV2Enabled(getConfig())) {
String flowName = launchContext.getEnvironment().get(TimelineUtils.FLOW_NAME_TAG_PREFIX);
String flowVersion = launchContext.getEnvironment().get(TimelineUtils.FLOW_VERSION_TAG_PREFIX);
String flowRunIdStr = launchContext.getEnvironment().get(TimelineUtils.FLOW_RUN_ID_TAG_PREFIX);
long flowRunId = 0L;
if (flowRunIdStr != null && !flowRunIdStr.isEmpty()) {
flowRunId = Long.parseLong(flowRunIdStr);
}
flowContext = new FlowContext(flowName, flowVersion, flowRunId);
}
if (!context.getApplications().containsKey(applicationID)) {
Application application = new ApplicationImpl(dispatcher, user, flowContext, applicationID, credentials, context);
if (context.getApplications().putIfAbsent(applicationID, application) == null) {
LOG.info("Creating a new application reference for app " + applicationID);
LogAggregationContext logAggregationContext = containerTokenIdentifier.getLogAggregationContext();
Map<ApplicationAccessType, String> appAcls = container.getLaunchContext().getApplicationACLs();
context.getNMStateStore().storeApplication(applicationID, buildAppProto(applicationID, user, credentials, appAcls, logAggregationContext));
dispatcher.getEventHandler().handle(new ApplicationInitEvent(applicationID, appAcls, logAggregationContext));
}
}
this.context.getNMStateStore().storeContainer(containerId, containerTokenIdentifier.getVersion(), request);
dispatcher.getEventHandler().handle(new ApplicationContainerInitEvent(container));
this.context.getContainerTokenSecretManager().startContainerSuccessful(containerTokenIdentifier);
NMAuditLogger.logSuccess(user, AuditConstants.START_CONTAINER, "ContainerManageImpl", applicationID, containerId);
// TODO launchedContainer misplaced -> doesn't necessarily mean a container
// launch. A finished Application will not launch containers.
metrics.launchedContainer();
metrics.allocateContainer(containerTokenIdentifier.getResource());
} else {
throw new YarnException("Container start failed as the NodeManager is " + "in the process of shutting down");
}
} finally {
this.readLock.unlock();
}
}
use of org.apache.hadoop.yarn.api.records.LogAggregationContext in project hadoop by apache.
the class TestLogAggregationService method testVerifyAndCreateRemoteDirsFailure.
@Test
public void testVerifyAndCreateRemoteDirsFailure() throws Exception {
this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath());
LogAggregationService logAggregationService = spy(new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler));
logAggregationService.init(this.conf);
YarnRuntimeException e = new YarnRuntimeException("KABOOM!");
doThrow(e).when(logAggregationService).verifyAndCreateRemoteLogDir(any(Configuration.class));
logAggregationService.start();
// Now try to start an application
ApplicationId appId = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000));
LogAggregationContext contextWithAMAndFailed = Records.newRecord(LogAggregationContext.class);
contextWithAMAndFailed.setLogAggregationPolicyClassName(AMOrFailedContainerLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(appId, this.user, null, this.acls, contextWithAMAndFailed));
dispatcher.await();
// Verify that it failed
ApplicationEvent[] expectedEvents = new ApplicationEvent[] { new ApplicationEvent(appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED) };
checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic");
Mockito.reset(logAggregationService);
// Now try to start another one
ApplicationId appId2 = BuilderUtils.newApplicationId(System.currentTimeMillis(), (int) (Math.random() * 1000));
File appLogDir = new File(localLogDir, appId2.toString());
appLogDir.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(appId2, this.user, null, this.acls, contextWithAMAndFailed));
dispatcher.await();
// Verify that it worked
expectedEvents = new ApplicationEvent[] { new // original failure
ApplicationEvent(// original failure
appId, ApplicationEventType.APPLICATION_LOG_HANDLING_FAILED), new // success
ApplicationEvent(// success
appId2, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED) };
checkEvents(appEventHandler, expectedEvents, false, "getType", "getApplicationID", "getDiagnostic");
logAggregationService.stop();
}
use of org.apache.hadoop.yarn.api.records.LogAggregationContext in project hadoop by apache.
the class TestLogAggregationService method testStopAfterError.
@Test(timeout = 20000)
public void testStopAfterError() throws Exception {
DeletionService delSrvc = mock(DeletionService.class);
// get the AppLogAggregationImpl thread to crash
LocalDirsHandlerService mockedDirSvc = mock(LocalDirsHandlerService.class);
when(mockedDirSvc.getLogDirs()).thenThrow(new RuntimeException());
LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, delSrvc, mockedDirSvc);
logAggregationService.init(this.conf);
logAggregationService.start();
ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
LogAggregationContext contextWithAllContainers = Records.newRecord(LogAggregationContext.class);
contextWithAllContainers.setLogAggregationPolicyClassName(AllContainerLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, this.acls, contextWithAllContainers));
logAggregationService.stop();
assertEquals(0, logAggregationService.getNumAggregators());
logAggregationService.close();
}
use of org.apache.hadoop.yarn.api.records.LogAggregationContext in project hadoop by apache.
the class TestLogAggregationService method testLogAggregationServiceWithPatterns.
@Test(timeout = 50000)
@SuppressWarnings("unchecked")
public void testLogAggregationServiceWithPatterns() throws Exception {
LogAggregationContext logAggregationContextWithIncludePatterns = Records.newRecord(LogAggregationContext.class);
String includePattern = "stdout|syslog";
logAggregationContextWithIncludePatterns.setIncludePattern(includePattern);
LogAggregationContext LogAggregationContextWithExcludePatterns = Records.newRecord(LogAggregationContext.class);
String excludePattern = "stdout|syslog";
LogAggregationContextWithExcludePatterns.setExcludePattern(excludePattern);
this.conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath());
this.conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, this.remoteRootLogDir.getAbsolutePath());
ApplicationId application1 = BuilderUtils.newApplicationId(1234, 1);
ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2);
ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3);
ApplicationId application4 = BuilderUtils.newApplicationId(1234, 4);
Application mockApp = mock(Application.class);
when(mockApp.getContainers()).thenReturn(new HashMap<ContainerId, Container>());
this.context.getApplications().put(application1, mockApp);
this.context.getApplications().put(application2, mockApp);
this.context.getApplications().put(application3, mockApp);
this.context.getApplications().put(application4, mockApp);
LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler);
logAggregationService.init(this.conf);
logAggregationService.start();
// LogContext for application1 has includePatten which includes
// stdout and syslog.
// After logAggregation is finished, we expect the logs for application1
// has only logs from stdout and syslog
// AppLogDir should be created
File appLogDir1 = new File(localLogDir, application1.toString());
appLogDir1.mkdir();
logAggregationService.handle(new LogHandlerAppStartedEvent(application1, this.user, null, this.acls, logAggregationContextWithIncludePatterns));
ApplicationAttemptId appAttemptId1 = BuilderUtils.newApplicationAttemptId(application1, 1);
ContainerId container1 = createContainer(appAttemptId1, 1, ContainerType.APPLICATION_MASTER);
// Simulate log-file creation
writeContainerLogs(appLogDir1, container1, new String[] { "stdout", "stderr", "syslog" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container1, 0));
// LogContext for application2 has excludePatten which includes
// stdout and syslog.
// After logAggregation is finished, we expect the logs for application2
// has only logs from stderr
ApplicationAttemptId appAttemptId2 = BuilderUtils.newApplicationAttemptId(application2, 1);
File app2LogDir = new File(localLogDir, application2.toString());
app2LogDir.mkdir();
LogAggregationContextWithExcludePatterns.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(application2, this.user, null, this.acls, LogAggregationContextWithExcludePatterns));
ContainerId container2 = createContainer(appAttemptId2, 1, ContainerType.APPLICATION_MASTER);
writeContainerLogs(app2LogDir, container2, new String[] { "stdout", "stderr", "syslog" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container2, 0));
// LogContext for application3 has includePattern which is *.log and
// excludePatten which includes std.log and sys.log.
// After logAggregation is finished, we expect the logs for application3
// has all logs whose suffix is .log but excluding sys.log and std.log
LogAggregationContext context1 = Records.newRecord(LogAggregationContext.class);
context1.setIncludePattern(".*.log");
context1.setExcludePattern("sys.log|std.log");
ApplicationAttemptId appAttemptId3 = BuilderUtils.newApplicationAttemptId(application3, 1);
File app3LogDir = new File(localLogDir, application3.toString());
app3LogDir.mkdir();
context1.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(application3, this.user, null, this.acls, context1));
ContainerId container3 = createContainer(appAttemptId3, 1, ContainerType.APPLICATION_MASTER);
writeContainerLogs(app3LogDir, container3, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container3, 0));
// LogContext for application4 has includePattern
// which includes std.log and sys.log and
// excludePatten which includes std.log.
// After logAggregation is finished, we expect the logs for application4
// only has sys.log
LogAggregationContext context2 = Records.newRecord(LogAggregationContext.class);
context2.setIncludePattern("sys.log|std.log");
context2.setExcludePattern("std.log");
ApplicationAttemptId appAttemptId4 = BuilderUtils.newApplicationAttemptId(application4, 1);
File app4LogDir = new File(localLogDir, application4.toString());
app4LogDir.mkdir();
context2.setLogAggregationPolicyClassName(AMOnlyLogAggregationPolicy.class.getName());
logAggregationService.handle(new LogHandlerAppStartedEvent(application4, this.user, null, this.acls, context2));
ContainerId container4 = createContainer(appAttemptId4, 1, ContainerType.APPLICATION_MASTER);
writeContainerLogs(app4LogDir, container4, new String[] { "stdout", "sys.log", "std.log", "out.log", "err.log", "log" });
logAggregationService.handle(new LogHandlerContainerFinishedEvent(container4, 0));
dispatcher.await();
ApplicationEvent[] expectedInitEvents = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_INITED) };
checkEvents(appEventHandler, expectedInitEvents, false, "getType", "getApplicationID");
reset(appEventHandler);
logAggregationService.handle(new LogHandlerAppFinishedEvent(application1));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application2));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application3));
logAggregationService.handle(new LogHandlerAppFinishedEvent(application4));
logAggregationService.stop();
assertEquals(0, logAggregationService.getNumAggregators());
String[] logFiles = new String[] { "stdout", "syslog" };
verifyContainerLogs(logAggregationService, application1, new ContainerId[] { container1 }, logFiles, 2, false);
logFiles = new String[] { "stderr" };
verifyContainerLogs(logAggregationService, application2, new ContainerId[] { container2 }, logFiles, 1, false);
logFiles = new String[] { "out.log", "err.log" };
verifyContainerLogs(logAggregationService, application3, new ContainerId[] { container3 }, logFiles, 2, false);
logFiles = new String[] { "sys.log" };
verifyContainerLogs(logAggregationService, application4, new ContainerId[] { container4 }, logFiles, 1, false);
dispatcher.await();
ApplicationEvent[] expectedFinishedEvents = new ApplicationEvent[] { new ApplicationEvent(application1, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application2, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application3, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED), new ApplicationEvent(application4, ApplicationEventType.APPLICATION_LOG_HANDLING_FINISHED) };
checkEvents(appEventHandler, expectedFinishedEvents, false, "getType", "getApplicationID");
}
use of org.apache.hadoop.yarn.api.records.LogAggregationContext in project hadoop by apache.
the class TestLogAggregationService method createLogAggregationService.
private LogAggregationService createLogAggregationService(ApplicationId appId, String className, String parameters, boolean createLogAggContext) {
ConcurrentHashMap<ContainerId, Container> containers = new ConcurrentHashMap<ContainerId, Container>();
LogAggregationService logAggregationService = new LogAggregationService(dispatcher, this.context, this.delSrvc, super.dirsHandler);
logAggregationService.init(this.conf);
logAggregationService.start();
LogAggregationContext logAggContext = null;
if (createLogAggContext) {
logAggContext = Records.newRecord(LogAggregationContext.class);
logAggContext.setLogAggregationPolicyClassName(className);
if (parameters != null) {
logAggContext.setLogAggregationPolicyParameters(parameters);
}
}
logAggregationService.handle(new LogHandlerAppStartedEvent(appId, this.user, null, this.acls, logAggContext));
return logAggregationService;
}
Aggregations