Search in sources :

Example 41 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class LogCLIHelpers method dumpAContainerLogsForLogType.

@Private
@VisibleForTesting
public int dumpAContainerLogsForLogType(ContainerLogsRequest options, boolean outputFailure) throws IOException {
    ApplicationId applicationId = options.getAppId();
    String jobOwner = options.getAppOwner();
    String nodeId = options.getNodeId();
    String containerId = options.getContainerId();
    String localDir = options.getOutputLocalDir();
    List<String> logType = new ArrayList<String>(options.getLogTypes());
    RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(applicationId, jobOwner);
    if (nodeFiles == null) {
        return -1;
    }
    boolean foundContainerLogs = false;
    while (nodeFiles.hasNext()) {
        FileStatus thisNodeFile = nodeFiles.next();
        String fileName = thisNodeFile.getPath().getName();
        if (fileName.equals(applicationId + ".har")) {
            Path p = new Path("har:///" + thisNodeFile.getPath().toUri().getRawPath());
            nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
            continue;
        }
        if (fileName.contains(LogAggregationUtils.getNodeString(nodeId)) && !fileName.endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
            AggregatedLogFormat.LogReader reader = null;
            PrintStream out = createPrintStream(localDir, fileName, containerId);
            try {
                reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
                if (getContainerLogsStream(containerId, reader) == null) {
                    continue;
                }
                String containerString = String.format(CONTAINER_ON_NODE_PATTERN, containerId, thisNodeFile.getPath().getName());
                out.println(containerString);
                out.println("LogAggregationType: AGGREGATED");
                out.println(StringUtils.repeat("=", containerString.length()));
                // We have to re-create reader object to reset the stream index
                // after calling getContainerLogsStream which would move the stream
                // index to the end of the log file.
                reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
                if (logType == null || logType.isEmpty()) {
                    if (dumpAContainerLogs(containerId, reader, out, thisNodeFile.getModificationTime(), options.getBytes()) > -1) {
                        foundContainerLogs = true;
                    }
                } else {
                    if (dumpAContainerLogsForALogType(containerId, reader, out, thisNodeFile.getModificationTime(), logType, options.getBytes()) > -1) {
                        foundContainerLogs = true;
                    }
                }
            } finally {
                if (reader != null) {
                    reader.close();
                }
                closePrintStream(out);
            }
        }
    }
    if (!foundContainerLogs) {
        if (outputFailure) {
            containerLogNotFound(containerId);
        }
        return -1;
    }
    return 0;
}
Also used : Path(org.apache.hadoop.fs.Path) LogReader(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader) PrintStream(java.io.PrintStream) FileStatus(org.apache.hadoop.fs.FileStatus) ArrayList(java.util.ArrayList) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 42 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class LogCLIHelpers method listContainerLogs.

@Private
public Set<String> listContainerLogs(ContainerLogsRequest options) throws IOException {
    Set<String> logTypes = new HashSet<String>();
    ApplicationId appId = options.getAppId();
    String appOwner = options.getAppOwner();
    String nodeId = options.getNodeId();
    String containerIdStr = options.getContainerId();
    boolean getAllContainers = (containerIdStr == null);
    String nodeIdStr = (nodeId == null) ? null : LogAggregationUtils.getNodeString(nodeId);
    RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(appId, appOwner);
    if (nodeFiles == null) {
        return logTypes;
    }
    while (nodeFiles.hasNext()) {
        FileStatus thisNodeFile = nodeFiles.next();
        if (nodeIdStr != null) {
            if (!thisNodeFile.getPath().getName().contains(nodeIdStr)) {
                continue;
            }
        }
        if (!thisNodeFile.getPath().getName().endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
            AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
            try {
                DataInputStream valueStream;
                LogKey key = new LogKey();
                valueStream = reader.next(key);
                while (valueStream != null) {
                    if (getAllContainers || (key.toString().equals(containerIdStr))) {
                        while (true) {
                            try {
                                String logFile = LogReader.readContainerMetaDataAndSkipData(valueStream).getFirst();
                                logTypes.add(logFile);
                            } catch (EOFException eof) {
                                break;
                            }
                        }
                        if (!getAllContainers) {
                            break;
                        }
                    }
                    // Next container
                    key = new LogKey();
                    valueStream = reader.next(key);
                }
            } finally {
                reader.close();
            }
        }
    }
    return logTypes;
}
Also used : LogReader(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader) FileStatus(org.apache.hadoop.fs.FileStatus) LogKey(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey) DataInputStream(java.io.DataInputStream) LogReader(org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader) EOFException(java.io.EOFException) ApplicationId(org.apache.hadoop.yarn.api.records.ApplicationId) HashSet(java.util.HashSet) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 43 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class RMProxy method createRMProxy.

/**
   * Currently, used by Client and AM only
   * Create a proxy for the specified protocol. For non-HA,
   * this is a direct connection to the ResourceManager address. When HA is
   * enabled, the proxy handles the failover between the ResourceManagers as
   * well.
   */
@Private
protected static <T> T createRMProxy(final Configuration configuration, final Class<T> protocol, RMProxy instance) throws IOException {
    YarnConfiguration conf = (configuration instanceof YarnConfiguration) ? (YarnConfiguration) configuration : new YarnConfiguration(configuration);
    RetryPolicy retryPolicy = createRetryPolicy(conf, HAUtil.isHAEnabled(conf));
    return newProxyInstance(conf, protocol, instance, retryPolicy);
}
Also used : YarnConfiguration(org.apache.hadoop.yarn.conf.YarnConfiguration) RetryPolicy(org.apache.hadoop.io.retry.RetryPolicy) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 44 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class TimelineACLsManager method setAdminACLsManager.

@Private
@VisibleForTesting
public AdminACLsManager setAdminACLsManager(AdminACLsManager adminAclsManager) {
    AdminACLsManager oldAdminACLsManager = this.adminAclsManager;
    this.adminAclsManager = adminAclsManager;
    return oldAdminACLsManager;
}
Also used : AdminACLsManager(org.apache.hadoop.yarn.security.AdminACLsManager) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Example 45 with Private

use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.

the class ContainerManagerImpl method authorizeStartAndResourceIncreaseRequest.

/**
   * @param containerTokenIdentifier
   *          of the container whose resource is to be started or increased
   * @throws YarnException
   */
@Private
@VisibleForTesting
protected void authorizeStartAndResourceIncreaseRequest(NMTokenIdentifier nmTokenIdentifier, ContainerTokenIdentifier containerTokenIdentifier, boolean startRequest) throws YarnException {
    if (nmTokenIdentifier == null) {
        throw RPCUtil.getRemoteException(INVALID_NMTOKEN_MSG);
    }
    if (containerTokenIdentifier == null) {
        throw RPCUtil.getRemoteException(INVALID_CONTAINERTOKEN_MSG);
    }
    /*
     * Check the following:
     * 1. The request comes from the same application attempt
     * 2. The request possess a container token that has not expired
     * 3. The request possess a container token that is granted by a known RM
     */
    ContainerId containerId = containerTokenIdentifier.getContainerID();
    String containerIDStr = containerId.toString();
    boolean unauthorized = false;
    StringBuilder messageBuilder = new StringBuilder("Unauthorized request to " + (startRequest ? "start container." : "increase container resource."));
    if (!nmTokenIdentifier.getApplicationAttemptId().getApplicationId().equals(containerId.getApplicationAttemptId().getApplicationId())) {
        unauthorized = true;
        messageBuilder.append("\nNMToken for application attempt : ").append(nmTokenIdentifier.getApplicationAttemptId()).append(" was used for " + (startRequest ? "starting " : "increasing resource of ") + "container with container token").append(" issued for application attempt : ").append(containerId.getApplicationAttemptId());
    } else if (startRequest && !this.context.getContainerTokenSecretManager().isValidStartContainerRequest(containerTokenIdentifier)) {
        // Is the container being relaunched? Or RPC layer let startCall with
        // tokens generated off old-secret through?
        unauthorized = true;
        messageBuilder.append("\n Attempt to relaunch the same ").append("container with id ").append(containerIDStr).append(".");
    } else if (containerTokenIdentifier.getExpiryTimeStamp() < System.currentTimeMillis()) {
        // Ensure the token is not expired.
        unauthorized = true;
        messageBuilder.append("\nThis token is expired. current time is ").append(System.currentTimeMillis()).append(" found ").append(containerTokenIdentifier.getExpiryTimeStamp());
        messageBuilder.append("\nNote: System times on machines may be out of sync.").append(" Check system time and time zones.");
    }
    if (unauthorized) {
        String msg = messageBuilder.toString();
        LOG.error(msg);
        throw RPCUtil.getRemoteException(msg);
    }
    if (containerTokenIdentifier.getRMIdentifier() != nodeStatusUpdater.getRMIdentifier()) {
        // Is the container coming from unknown RM
        StringBuilder sb = new StringBuilder("\nContainer ");
        sb.append(containerTokenIdentifier.getContainerID().toString()).append(" rejected as it is allocated by a previous RM");
        throw new InvalidContainerException(sb.toString());
    }
}
Also used : ContainerId(org.apache.hadoop.yarn.api.records.ContainerId) InvalidContainerException(org.apache.hadoop.yarn.exceptions.InvalidContainerException) ByteString(com.google.protobuf.ByteString) VisibleForTesting(com.google.common.annotations.VisibleForTesting) Private(org.apache.hadoop.classification.InterfaceAudience.Private)

Aggregations

Private (org.apache.hadoop.classification.InterfaceAudience.Private)52 VisibleForTesting (com.google.common.annotations.VisibleForTesting)15 ApplicationId (org.apache.hadoop.yarn.api.records.ApplicationId)12 IOException (java.io.IOException)9 FileStatus (org.apache.hadoop.fs.FileStatus)8 ArrayList (java.util.ArrayList)6 Path (org.apache.hadoop.fs.Path)6 DataInputStream (java.io.DataInputStream)5 EOFException (java.io.EOFException)5 PrintStream (java.io.PrintStream)5 LogReader (org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader)5 ApplicationAttemptId (org.apache.hadoop.yarn.api.records.ApplicationAttemptId)4 ContainerId (org.apache.hadoop.yarn.api.records.ContainerId)4 Resource (org.apache.hadoop.yarn.api.records.Resource)4 YarnRuntimeException (org.apache.hadoop.yarn.exceptions.YarnRuntimeException)4 ByteString (com.google.protobuf.ByteString)2 FileNotFoundException (java.io.FileNotFoundException)2 AccessDeniedException (java.nio.file.AccessDeniedException)2 HashSet (java.util.HashSet)2 FileSystem (org.apache.hadoop.fs.FileSystem)2