Search in sources :

Example 1 with ContainerType

use of org.apache.hadoop.yarn.server.api.ContainerType in project hadoop by apache.

the class LogAggregationService method stopContainer.

private void stopContainer(ContainerId containerId, int exitCode) {
    // A container is complete. Put this containers' logs up for aggregation if
    // this containers' logs are needed.
    AppLogAggregator aggregator = this.appLogAggregators.get(containerId.getApplicationAttemptId().getApplicationId());
    if (aggregator == null) {
        LOG.warn("Log aggregation is not initialized for " + containerId + ", did it fail to start?");
        return;
    }
    Container container = context.getContainers().get(containerId);
    if (null == container) {
        LOG.warn("Log aggregation cannot be started for " + containerId + ", as its an absent container");
        return;
    }
    ContainerType containerType = container.getContainerTokenIdentifier().getContainerType();
    aggregator.startContainerLogAggregation(new ContainerLogContext(containerId, containerType, exitCode));
}
Also used : Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerType(org.apache.hadoop.yarn.server.api.ContainerType) ContainerLogContext(org.apache.hadoop.yarn.server.api.ContainerLogContext)

Example 2 with ContainerType

use of org.apache.hadoop.yarn.server.api.ContainerType in project hadoop by apache.

the class AppLogAggregatorImpl method uploadLogsForContainers.

private void uploadLogsForContainers(boolean appFinished) {
    if (this.logAggregationDisabled) {
        return;
    }
    if (UserGroupInformation.isSecurityEnabled()) {
        Credentials systemCredentials = context.getSystemCredentialsForApps().get(appId);
        if (systemCredentials != null) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("Adding new framework-token for " + appId + " for log-aggregation: " + systemCredentials.getAllTokens() + "; userUgi=" + userUgi);
            }
            // this will replace old token
            userUgi.addCredentials(systemCredentials);
        }
    }
    // Create a set of Containers whose logs will be uploaded in this cycle.
    // It includes:
    // a) all containers in pendingContainers: those containers are finished
    //    and satisfy the ContainerLogAggregationPolicy.
    // b) some set of running containers: For all the Running containers,
    //    we use exitCode of 0 to find those which satisfy the
    //    ContainerLogAggregationPolicy.
    Set<ContainerId> pendingContainerInThisCycle = new HashSet<ContainerId>();
    this.pendingContainers.drainTo(pendingContainerInThisCycle);
    Set<ContainerId> finishedContainers = new HashSet<ContainerId>(pendingContainerInThisCycle);
    if (this.context.getApplications().get(this.appId) != null) {
        for (Container container : this.context.getApplications().get(this.appId).getContainers().values()) {
            ContainerType containerType = container.getContainerTokenIdentifier().getContainerType();
            if (shouldUploadLogs(new ContainerLogContext(container.getContainerId(), containerType, 0))) {
                pendingContainerInThisCycle.add(container.getContainerId());
            }
        }
    }
    LogWriter writer = null;
    String diagnosticMessage = "";
    boolean logAggregationSucceedInThisCycle = true;
    try {
        if (pendingContainerInThisCycle.isEmpty()) {
            return;
        }
        logAggregationTimes++;
        try {
            writer = createLogWriter();
            // Write ACLs once when the writer is created.
            writer.writeApplicationACLs(appAcls);
            writer.writeApplicationOwner(this.userUgi.getShortUserName());
        } catch (IOException e1) {
            logAggregationSucceedInThisCycle = false;
            LOG.error("Cannot create writer for app " + this.applicationId + ". Skip log upload this time. ", e1);
            return;
        }
        boolean uploadedLogsInThisCycle = false;
        for (ContainerId container : pendingContainerInThisCycle) {
            ContainerLogAggregator aggregator = null;
            if (containerLogAggregators.containsKey(container)) {
                aggregator = containerLogAggregators.get(container);
            } else {
                aggregator = new ContainerLogAggregator(container);
                containerLogAggregators.put(container, aggregator);
            }
            Set<Path> uploadedFilePathsInThisCycle = aggregator.doContainerLogAggregation(writer, appFinished, finishedContainers.contains(container));
            if (uploadedFilePathsInThisCycle.size() > 0) {
                uploadedLogsInThisCycle = true;
                this.delService.delete(this.userUgi.getShortUserName(), null, uploadedFilePathsInThisCycle.toArray(new Path[uploadedFilePathsInThisCycle.size()]));
            }
            // remove it from containerLogAggregators.
            if (finishedContainers.contains(container)) {
                containerLogAggregators.remove(container);
            }
        }
        // is smaller than the configured NM log aggregation retention size.
        if (uploadedLogsInThisCycle && logAggregationInRolling) {
            cleanOldLogs();
            cleanupOldLogTimes++;
        }
        if (writer != null) {
            writer.close();
            writer = null;
        }
        long currentTime = System.currentTimeMillis();
        final Path renamedPath = this.rollingMonitorInterval <= 0 ? remoteNodeLogFileForApp : new Path(remoteNodeLogFileForApp.getParent(), remoteNodeLogFileForApp.getName() + "_" + currentTime);
        final boolean rename = uploadedLogsInThisCycle;
        try {
            userUgi.doAs(new PrivilegedExceptionAction<Object>() {

                @Override
                public Object run() throws Exception {
                    FileSystem remoteFS = remoteNodeLogFileForApp.getFileSystem(conf);
                    if (rename) {
                        remoteFS.rename(remoteNodeTmpLogFileForApp, renamedPath);
                    } else {
                        remoteFS.delete(remoteNodeTmpLogFileForApp, false);
                    }
                    return null;
                }
            });
            diagnosticMessage = "Log uploaded successfully for Application: " + appId + " in NodeManager: " + LogAggregationUtils.getNodeString(nodeId) + " at " + Times.format(currentTime) + "\n";
        } catch (Exception e) {
            LOG.error("Failed to move temporary log file to final location: [" + remoteNodeTmpLogFileForApp + "] to [" + renamedPath + "]", e);
            diagnosticMessage = "Log uploaded failed for Application: " + appId + " in NodeManager: " + LogAggregationUtils.getNodeString(nodeId) + " at " + Times.format(currentTime) + "\n";
            renameTemporaryLogFileFailed = true;
            logAggregationSucceedInThisCycle = false;
        }
    } finally {
        LogAggregationStatus logAggregationStatus = logAggregationSucceedInThisCycle ? LogAggregationStatus.RUNNING : LogAggregationStatus.RUNNING_WITH_FAILURE;
        sendLogAggregationReport(logAggregationStatus, diagnosticMessage);
        if (appFinished) {
            // If the app is finished, one extra final report with log aggregation
            // status SUCCEEDED/FAILED will be sent to RM to inform the RM
            // that the log aggregation in this NM is completed.
            LogAggregationStatus finalLogAggregationStatus = renameTemporaryLogFileFailed || !logAggregationSucceedInThisCycle ? LogAggregationStatus.FAILED : LogAggregationStatus.SUCCEEDED;
            sendLogAggregationReport(finalLogAggregationStatus, "");
        }
        if (writer != null) {
            writer.close();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ContainerType(org.apache.hadoop.yarn.server.api.ContainerType) ContainerLogContext(org.apache.hadoop.yarn.server.api.ContainerLogContext) IOException(java.io.IOException) IOException(java.io.IOException) UnsupportedFileSystemException(org.apache.hadoop.fs.UnsupportedFileSystemException) Container(org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container) ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) LogWriter(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter) FileSystem(org.apache.hadoop.fs.FileSystem) LogAggregationStatus(org.apache.hadoop.yarn.api.records.LogAggregationStatus) Credentials(org.apache.hadoop.security.Credentials) HashSet(java.util.HashSet)

Example 3 with ContainerType

use of org.apache.hadoop.yarn.server.api.ContainerType in project hadoop by apache.

the class SchedulerApplicationAttempt method updateContainerAndNMToken.

private Container updateContainerAndNMToken(RMContainer rmContainer, ContainerUpdateType updateType) {
    Container container = rmContainer.getContainer();
    ContainerType containerType = ContainerType.TASK;
    if (updateType != null) {
        container.setVersion(container.getVersion() + 1);
    }
    // itself is the master container.
    if (isWaitingForAMContainer()) {
        containerType = ContainerType.APPLICATION_MASTER;
    }
    try {
        // create container token and NMToken altogether.
        container.setContainerToken(rmContext.getContainerTokenSecretManager().createContainerToken(container.getId(), container.getVersion(), container.getNodeId(), getUser(), container.getResource(), container.getPriority(), rmContainer.getCreationTime(), this.logAggregationContext, rmContainer.getNodeLabelExpression(), containerType));
        updateNMToken(container);
    } catch (IllegalArgumentException e) {
        // DNS might be down, skip returning this container.
        LOG.error("Error trying to assign container token and NM token to" + " an updated container " + container.getId(), e);
        return null;
    }
    if (updateType == null || ContainerUpdateType.PROMOTE_EXECUTION_TYPE == updateType || ContainerUpdateType.DEMOTE_EXECUTION_TYPE == updateType) {
        rmContainer.handle(new RMContainerEvent(rmContainer.getContainerId(), RMContainerEventType.ACQUIRED));
    } else {
        rmContainer.handle(new RMContainerUpdatesAcquiredEvent(rmContainer.getContainerId(), ContainerUpdateType.INCREASE_RESOURCE == updateType));
        if (ContainerUpdateType.DECREASE_RESOURCE == updateType) {
            this.rmContext.getDispatcher().getEventHandler().handle(new RMNodeDecreaseContainerEvent(rmContainer.getNodeId(), Collections.singletonList(rmContainer.getContainer())));
        }
    }
    return container;
}
Also used : RMContainer(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer) Container(org.apache.hadoop.yarn.api.records.Container) RMContainerEvent(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent) ContainerType(org.apache.hadoop.yarn.server.api.ContainerType) RMNodeDecreaseContainerEvent(org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeDecreaseContainerEvent) RMContainerUpdatesAcquiredEvent(org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerUpdatesAcquiredEvent)

Aggregations

ContainerType (org.apache.hadoop.yarn.server.api.ContainerType)3 ContainerLogContext (org.apache.hadoop.yarn.server.api.ContainerLogContext)2 Container (org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container)2 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1 FileSystem (org.apache.hadoop.fs.FileSystem)1 Path (org.apache.hadoop.fs.Path)1 UnsupportedFileSystemException (org.apache.hadoop.fs.UnsupportedFileSystemException)1 Credentials (org.apache.hadoop.security.Credentials)1 Container (org.apache.hadoop.yarn.api.records.Container)1 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)1 LogAggregationStatus (org.apache.hadoop.yarn.api.records.LogAggregationStatus)1 LogWriter (org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogWriter)1 RMContainer (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer)1 RMContainerEvent (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEvent)1 RMContainerUpdatesAcquiredEvent (org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerUpdatesAcquiredEvent)1 RMNodeDecreaseContainerEvent (org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeDecreaseContainerEvent)1