use of org.apache.hadoop.yarn.server.api.ContainerType in project hadoop by apache.
the class LogAggregationService method stopContainer.
private void stopContainer(ContainerId containerId, int exitCode) {
// A container is complete. Put this containers' logs up for aggregation if
// this containers' logs are needed.
AppLogAggregator aggregator = this.appLogAggregators.get(containerId.getApplicationAttemptId().getApplicationId());
if (aggregator == null) {
LOG.warn("Log aggregation is not initialized for " + containerId + ", did it fail to start?");
return;
}
Container container = context.getContainers().get(containerId);
if (null == container) {
LOG.warn("Log aggregation cannot be started for " + containerId + ", as its an absent container");
return;
}
ContainerType containerType = container.getContainerTokenIdentifier().getContainerType();
aggregator.startContainerLogAggregation(new ContainerLogContext(containerId, containerType, exitCode));
}
use of org.apache.hadoop.yarn.server.api.ContainerType in project hadoop by apache.
the class AppLogAggregatorImpl method uploadLogsForContainers.
private void uploadLogsForContainers(boolean appFinished) {
if (this.logAggregationDisabled) {
return;
}
if (UserGroupInformation.isSecurityEnabled()) {
Credentials systemCredentials = context.getSystemCredentialsForApps().get(appId);
if (systemCredentials != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Adding new framework-token for " + appId + " for log-aggregation: " + systemCredentials.getAllTokens() + "; userUgi=" + userUgi);
}
// this will replace old token
userUgi.addCredentials(systemCredentials);
}
}
// Create a set of Containers whose logs will be uploaded in this cycle.
// It includes:
// a) all containers in pendingContainers: those containers are finished
// and satisfy the ContainerLogAggregationPolicy.
// b) some set of running containers: For all the Running containers,
// we use exitCode of 0 to find those which satisfy the
// ContainerLogAggregationPolicy.
Set<ContainerId> pendingContainerInThisCycle = new HashSet<ContainerId>();
this.pendingContainers.drainTo(pendingContainerInThisCycle);
Set<ContainerId> finishedContainers = new HashSet<ContainerId>(pendingContainerInThisCycle);
if (this.context.getApplications().get(this.appId) != null) {
for (Container container : this.context.getApplications().get(this.appId).getContainers().values()) {
ContainerType containerType = container.getContainerTokenIdentifier().getContainerType();
if (shouldUploadLogs(new ContainerLogContext(container.getContainerId(), containerType, 0))) {
pendingContainerInThisCycle.add(container.getContainerId());
}
}
}
LogWriter writer = null;
String diagnosticMessage = "";
boolean logAggregationSucceedInThisCycle = true;
try {
if (pendingContainerInThisCycle.isEmpty()) {
return;
}
logAggregationTimes++;
try {
writer = createLogWriter();
// Write ACLs once when the writer is created.
writer.writeApplicationACLs(appAcls);
writer.writeApplicationOwner(this.userUgi.getShortUserName());
} catch (IOException e1) {
logAggregationSucceedInThisCycle = false;
LOG.error("Cannot create writer for app " + this.applicationId + ". Skip log upload this time. ", e1);
return;
}
boolean uploadedLogsInThisCycle = false;
for (ContainerId container : pendingContainerInThisCycle) {
ContainerLogAggregator aggregator = null;
if (containerLogAggregators.containsKey(container)) {
aggregator = containerLogAggregators.get(container);
} else {
aggregator = new ContainerLogAggregator(container);
containerLogAggregators.put(container, aggregator);
}
Set<Path> uploadedFilePathsInThisCycle = aggregator.doContainerLogAggregation(writer, appFinished, finishedContainers.contains(container));
if (uploadedFilePathsInThisCycle.size() > 0) {
uploadedLogsInThisCycle = true;
this.delService.delete(this.userUgi.getShortUserName(), null, uploadedFilePathsInThisCycle.toArray(new Path[uploadedFilePathsInThisCycle.size()]));
}
// remove it from containerLogAggregators.
if (finishedContainers.contains(container)) {
containerLogAggregators.remove(container);
}
}
// is smaller than the configured NM log aggregation retention size.
if (uploadedLogsInThisCycle && logAggregationInRolling) {
cleanOldLogs();
cleanupOldLogTimes++;
}
if (writer != null) {
writer.close();
writer = null;
}
long currentTime = System.currentTimeMillis();
final Path renamedPath = this.rollingMonitorInterval <= 0 ? remoteNodeLogFileForApp : new Path(remoteNodeLogFileForApp.getParent(), remoteNodeLogFileForApp.getName() + "_" + currentTime);
final boolean rename = uploadedLogsInThisCycle;
try {
userUgi.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
FileSystem remoteFS = remoteNodeLogFileForApp.getFileSystem(conf);
if (rename) {
remoteFS.rename(remoteNodeTmpLogFileForApp, renamedPath);
} else {
remoteFS.delete(remoteNodeTmpLogFileForApp, false);
}
return null;
}
});
diagnosticMessage = "Log uploaded successfully for Application: " + appId + " in NodeManager: " + LogAggregationUtils.getNodeString(nodeId) + " at " + Times.format(currentTime) + "\n";
} catch (Exception e) {
LOG.error("Failed to move temporary log file to final location: [" + remoteNodeTmpLogFileForApp + "] to [" + renamedPath + "]", e);
diagnosticMessage = "Log uploaded failed for Application: " + appId + " in NodeManager: " + LogAggregationUtils.getNodeString(nodeId) + " at " + Times.format(currentTime) + "\n";
renameTemporaryLogFileFailed = true;
logAggregationSucceedInThisCycle = false;
}
} finally {
LogAggregationStatus logAggregationStatus = logAggregationSucceedInThisCycle ? LogAggregationStatus.RUNNING : LogAggregationStatus.RUNNING_WITH_FAILURE;
sendLogAggregationReport(logAggregationStatus, diagnosticMessage);
if (appFinished) {
// If the app is finished, one extra final report with log aggregation
// status SUCCEEDED/FAILED will be sent to RM to inform the RM
// that the log aggregation in this NM is completed.
LogAggregationStatus finalLogAggregationStatus = renameTemporaryLogFileFailed || !logAggregationSucceedInThisCycle ? LogAggregationStatus.FAILED : LogAggregationStatus.SUCCEEDED;
sendLogAggregationReport(finalLogAggregationStatus, "");
}
if (writer != null) {
writer.close();
}
}
}
use of org.apache.hadoop.yarn.server.api.ContainerType in project hadoop by apache.
the class SchedulerApplicationAttempt method updateContainerAndNMToken.
private Container updateContainerAndNMToken(RMContainer rmContainer, ContainerUpdateType updateType) {
Container container = rmContainer.getContainer();
ContainerType containerType = ContainerType.TASK;
if (updateType != null) {
container.setVersion(container.getVersion() + 1);
}
// itself is the master container.
if (isWaitingForAMContainer()) {
containerType = ContainerType.APPLICATION_MASTER;
}
try {
// create container token and NMToken altogether.
container.setContainerToken(rmContext.getContainerTokenSecretManager().createContainerToken(container.getId(), container.getVersion(), container.getNodeId(), getUser(), container.getResource(), container.getPriority(), rmContainer.getCreationTime(), this.logAggregationContext, rmContainer.getNodeLabelExpression(), containerType));
updateNMToken(container);
} catch (IllegalArgumentException e) {
// DNS might be down, skip returning this container.
LOG.error("Error trying to assign container token and NM token to" + " an updated container " + container.getId(), e);
return null;
}
if (updateType == null || ContainerUpdateType.PROMOTE_EXECUTION_TYPE == updateType || ContainerUpdateType.DEMOTE_EXECUTION_TYPE == updateType) {
rmContainer.handle(new RMContainerEvent(rmContainer.getContainerId(), RMContainerEventType.ACQUIRED));
} else {
rmContainer.handle(new RMContainerUpdatesAcquiredEvent(rmContainer.getContainerId(), ContainerUpdateType.INCREASE_RESOURCE == updateType));
if (ContainerUpdateType.DECREASE_RESOURCE == updateType) {
this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeDecreaseContainerEvent(rmContainer.getNodeId(), Collections.singletonList(rmContainer.getContainer())));
}
}
return container;
}
Aggregations