use of org.apache.hadoop.yarn.server.api.ContainerLogContext in project hadoop by apache.
the class LogAggregationService method stopContainer.
private void stopContainer(ContainerId containerId, int exitCode) {
// A container is complete. Put this containers' logs up for aggregation if
// this containers' logs are needed.
AppLogAggregator aggregator = this.appLogAggregators.get(containerId.getApplicationAttemptId().getApplicationId());
if (aggregator == null) {
LOG.warn("Log aggregation is not initialized for " + containerId + ", did it fail to start?");
return;
}
Container container = context.getContainers().get(containerId);
if (null == container) {
LOG.warn("Log aggregation cannot be started for " + containerId + ", as its an absent container");
return;
}
ContainerType containerType = container.getContainerTokenIdentifier().getContainerType();
aggregator.startContainerLogAggregation(new ContainerLogContext(containerId, containerType, exitCode));
}
use of org.apache.hadoop.yarn.server.api.ContainerLogContext in project hadoop by apache.
the class AppLogAggregatorImpl method uploadLogsForContainers.
private void uploadLogsForContainers(boolean appFinished) {
if (this.logAggregationDisabled) {
return;
}
if (UserGroupInformation.isSecurityEnabled()) {
Credentials systemCredentials = context.getSystemCredentialsForApps().get(appId);
if (systemCredentials != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding new framework-token for " + appId + " for log-aggregation: " + systemCredentials.getAllTokens() + "; userUgi=" + userUgi);
}
// this will replace old token
userUgi.addCredentials(systemCredentials);
}
}
// Create a set of Containers whose logs will be uploaded in this cycle.
// It includes:
// a) all containers in pendingContainers: those containers are finished
// and satisfy the ContainerLogAggregationPolicy.
// b) some set of running containers: For all the Running containers,
// we use exitCode of 0 to find those which satisfy the
// ContainerLogAggregationPolicy.
Set<ContainerId> pendingContainerInThisCycle = new HashSet<ContainerId>();
this.pendingContainers.drainTo(pendingContainerInThisCycle);
Set<ContainerId> finishedContainers = new HashSet<ContainerId>(pendingContainerInThisCycle);
if (this.context.getApplications().get(this.appId) != null) {
for (Container container : this.context.getApplications().get(this.appId).getContainers().values()) {
ContainerType containerType = container.getContainerTokenIdentifier().getContainerType();
if (shouldUploadLogs(new ContainerLogContext(container.getContainerId(), containerType, 0))) {
pendingContainerInThisCycle.add(container.getContainerId());
}
}
}
LogWriter writer = null;
String diagnosticMessage = "";
boolean logAggregationSucceedInThisCycle = true;
try {
if (pendingContainerInThisCycle.isEmpty()) {
return;
}
logAggregationTimes++;
try {
writer = createLogWriter();
// Write ACLs once when the writer is created.
writer.writeApplicationACLs(appAcls);
writer.writeApplicationOwner(this.userUgi.getShortUserName());
} catch (IOException e1) {
logAggregationSucceedInThisCycle = false;
LOG.error("Cannot create writer for app " + this.applicationId + ". Skip log upload this time. ", e1);
return;
}
boolean uploadedLogsInThisCycle = false;
for (ContainerId container : pendingContainerInThisCycle) {
ContainerLogAggregator aggregator = null;
if (containerLogAggregators.containsKey(container)) {
aggregator = containerLogAggregators.get(container);
} else {
aggregator = new ContainerLogAggregator(container);
containerLogAggregators.put(container, aggregator);
}
Set<Path> uploadedFilePathsInThisCycle = aggregator.doContainerLogAggregation(writer, appFinished, finishedContainers.contains(container));
if (uploadedFilePathsInThisCycle.size() > 0) {
uploadedLogsInThisCycle = true;
this.delService.delete(this.userUgi.getShortUserName(), null, uploadedFilePathsInThisCycle.toArray(new Path[uploadedFilePathsInThisCycle.size()]));
}
// remove it from containerLogAggregators.
if (finishedContainers.contains(container)) {
containerLogAggregators.remove(container);
}
}
// is smaller than the configured NM log aggregation retention size.
if (uploadedLogsInThisCycle && logAggregationInRolling) {
cleanOldLogs();
cleanupOldLogTimes++;
}
if (writer != null) {
writer.close();
writer = null;
}
long currentTime = System.currentTimeMillis();
final Path renamedPath = this.rollingMonitorInterval <= 0 ? remoteNodeLogFileForApp : new Path(remoteNodeLogFileForApp.getParent(), remoteNodeLogFileForApp.getName() + "_" + currentTime);
final boolean rename = uploadedLogsInThisCycle;
try {
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
FileSystem remoteFS = remoteNodeLogFileForApp.getFileSystem(conf);
if (rename) {
remoteFS.rename(remoteNodeTmpLogFileForApp, renamedPath);
} else {
remoteFS.delete(remoteNodeTmpLogFileForApp, false);
}
return null;
}
});
diagnosticMessage = "Log uploaded successfully for Application: " + appId + " in NodeManager: " + LogAggregationUtils.getNodeString(nodeId) + " at " + Times.format(currentTime) + "\n";
} catch (Exception e) {
LOG.error("Failed to move temporary log file to final location: [" + remoteNodeTmpLogFileForApp + "] to [" + renamedPath + "]", e);
diagnosticMessage = "Log uploaded failed for Application: " + appId + " in NodeManager: " + LogAggregationUtils.getNodeString(nodeId) + " at " + Times.format(currentTime) + "\n";
renameTemporaryLogFileFailed = true;
logAggregationSucceedInThisCycle = false;
}
} finally {
LogAggregationStatus logAggregationStatus = logAggregationSucceedInThisCycle ? LogAggregationStatus.RUNNING : LogAggregationStatus.RUNNING_WITH_FAILURE;
sendLogAggregationReport(logAggregationStatus, diagnosticMessage);
if (appFinished) {
// If the app is finished, one extra final report with log aggregation
// status SUCCEEDED/FAILED will be sent to RM to inform the RM
// that the log aggregation in this NM is completed.
LogAggregationStatus finalLogAggregationStatus = renameTemporaryLogFileFailed || !logAggregationSucceedInThisCycle ? LogAggregationStatus.FAILED : LogAggregationStatus.SUCCEEDED;
sendLogAggregationReport(finalLogAggregationStatus, "");
}
if (writer != null) {
writer.close();
}
}
}
use of org.apache.hadoop.yarn.server.api.ContainerLogContext in project hadoop by apache.
the class TestAppLogAggregatorImpl method verifyLogAggregationWithExpectedFiles2DeleteAndUpload.
/**
* Verify if the application log aggregator, configured with given log
* retention period and the recovered log initialization time of
* the application, uploads and deletes the set of log files as expected.
* @param appId application id
* @param containerId container id
* @param logRetentionSecs log retention period
* @param recoveredLogInitedTimeMillis recovered log initialization time
* @param expectedFilesToDelete the set of files expected to be deleted
* @param expectedFilesToUpload the set of files expected to be uploaded.
*/
public void verifyLogAggregationWithExpectedFiles2DeleteAndUpload(ApplicationId appId, ContainerId containerId, long logRetentionSecs, long recoveredLogInitedTimeMillis, Set<File> expectedFilesToDelete, Set<File> expectedFilesToUpload) throws IOException {
final Set<String> filesExpected2Delete = new HashSet<>();
for (File file : expectedFilesToDelete) {
filesExpected2Delete.add(file.getAbsolutePath());
}
final Set<String> filesExpected2Upload = new HashSet<>();
for (File file : expectedFilesToUpload) {
filesExpected2Upload.add(file.getAbsolutePath());
}
// deletion service with verification to check files to delete
DeletionService deletionServiceWithExpectedFiles = createDeletionServiceWithExpectedFile2Delete(filesExpected2Delete);
final YarnConfiguration config = new YarnConfiguration();
config.setLong(YarnConfiguration.LOG_AGGREGATION_RETAIN_SECONDS, logRetentionSecs);
final AppLogAggregatorInTest appLogAggregator = createAppLogAggregator(appId, LOCAL_LOG_DIR.getAbsolutePath(), config, recoveredLogInitedTimeMillis, deletionServiceWithExpectedFiles);
appLogAggregator.startContainerLogAggregation(new ContainerLogContext(containerId, ContainerType.TASK, 0));
// set app finished flag first
appLogAggregator.finishLogAggregation();
appLogAggregator.run();
// verify uploaded files
ArgumentCaptor<LogValue> logValCaptor = ArgumentCaptor.forClass(LogValue.class);
verify(appLogAggregator.logWriter).append(any(LogKey.class), logValCaptor.capture());
Set<String> filesUploaded = new HashSet<>();
LogValue logValue = logValCaptor.getValue();
for (File file : logValue.getPendingLogFilesToUploadForThisContainer()) {
filesUploaded.add(file.getAbsolutePath());
}
verifyFilesUploaded(filesUploaded, filesExpected2Upload);
}
Aggregations