use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LogCLIHelpers method dumpAContainerLogsForALogType.
@Private
public int dumpAContainerLogsForALogType(String containerIdStr, AggregatedLogFormat.LogReader reader, PrintStream out, long logUploadedTime, List<String> logType, long bytes) throws IOException {
DataInputStream valueStream = getContainerLogsStream(containerIdStr, reader);
if (valueStream == null) {
return -1;
}
boolean foundContainerLogs = false;
while (true) {
try {
int result = LogReader.readContainerLogsForALogType(valueStream, out, logUploadedTime, logType, bytes);
if (result == 0) {
foundContainerLogs = true;
}
} catch (EOFException eof) {
break;
}
}
if (foundContainerLogs) {
return 0;
}
return -1;
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LogCLIHelpers method dumpAContainerLogsForLogType.
@Private
@VisibleForTesting
public int dumpAContainerLogsForLogType(ContainerLogsRequest options, boolean outputFailure) throws IOException {
ApplicationId applicationId = options.getAppId();
String jobOwner = options.getAppOwner();
String nodeId = options.getNodeId();
String containerId = options.getContainerId();
String localDir = options.getOutputLocalDir();
List<String> logType = new ArrayList<String>(options.getLogTypes());
RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(applicationId, jobOwner);
if (nodeFiles == null) {
return -1;
}
boolean foundContainerLogs = false;
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
String fileName = thisNodeFile.getPath().getName();
if (fileName.equals(applicationId + ".har")) {
Path p = new Path("har:///" + thisNodeFile.getPath().toUri().getRawPath());
nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
continue;
}
if (fileName.contains(LogAggregationUtils.getNodeString(nodeId)) && !fileName.endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
AggregatedLogFormat.LogReader reader = null;
PrintStream out = createPrintStream(localDir, fileName, containerId);
try {
reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
if (getContainerLogsStream(containerId, reader) == null) {
continue;
}
String containerString = String.format(CONTAINER_ON_NODE_PATTERN, containerId, thisNodeFile.getPath().getName());
out.println(containerString);
out.println("LogAggregationType: AGGREGATED");
out.println(StringUtils.repeat("=", containerString.length()));
// We have to re-create reader object to reset the stream index
// after calling getContainerLogsStream which would move the stream
// index to the end of the log file.
reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
if (logType == null || logType.isEmpty()) {
if (dumpAContainerLogs(containerId, reader, out, thisNodeFile.getModificationTime(), options.getBytes()) > -1) {
foundContainerLogs = true;
}
} else {
if (dumpAContainerLogsForALogType(containerId, reader, out, thisNodeFile.getModificationTime(), logType, options.getBytes()) > -1) {
foundContainerLogs = true;
}
}
} finally {
if (reader != null) {
reader.close();
}
closePrintStream(out);
}
}
}
if (!foundContainerLogs) {
if (outputFailure) {
containerLogNotFound(containerId);
}
return -1;
}
return 0;
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LogCLIHelpers method listContainerLogs.
@Private
public Set<String> listContainerLogs(ContainerLogsRequest options) throws IOException {
Set<String> logTypes = new HashSet<String>();
ApplicationId appId = options.getAppId();
String appOwner = options.getAppOwner();
String nodeId = options.getNodeId();
String containerIdStr = options.getContainerId();
boolean getAllContainers = (containerIdStr == null);
String nodeIdStr = (nodeId == null) ? null : LogAggregationUtils.getNodeString(nodeId);
RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(appId, appOwner);
if (nodeFiles == null) {
return logTypes;
}
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
if (nodeIdStr != null) {
if (!thisNodeFile.getPath().getName().contains(nodeIdStr)) {
continue;
}
}
if (!thisNodeFile.getPath().getName().endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
try {
DataInputStream valueStream;
LogKey key = new LogKey();
valueStream = reader.next(key);
while (valueStream != null) {
if (getAllContainers || (key.toString().equals(containerIdStr))) {
while (true) {
try {
String logFile = LogReader.readContainerMetaDataAndSkipData(valueStream).getFirst();
logTypes.add(logFile);
} catch (EOFException eof) {
break;
}
}
if (!getAllContainers) {
break;
}
}
// Next container
key = new LogKey();
valueStream = reader.next(key);
}
} finally {
reader.close();
}
}
}
return logTypes;
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class RMProxy method createRMProxy.
/**
* Currently, used by Client and AM only
* Create a proxy for the specified protocol. For non-HA,
* this is a direct connection to the ResourceManager address. When HA is
* enabled, the proxy handles the failover between the ResourceManagers as
* well.
*/
@Private
protected static <T> T createRMProxy(final Configuration configuration, final Class<T> protocol, RMProxy instance) throws IOException {
YarnConfiguration conf = (configuration instanceof YarnConfiguration) ? (YarnConfiguration) configuration : new YarnConfiguration(configuration);
RetryPolicy retryPolicy = createRetryPolicy(conf, HAUtil.isHAEnabled(conf));
return newProxyInstance(conf, protocol, instance, retryPolicy);
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project tez by apache.
the class YARNRunner method configureMRInputWithLegacySplitsGenerated.
@Private
private static DataSourceDescriptor configureMRInputWithLegacySplitsGenerated(Configuration conf, boolean useLegacyInput) {
InputDescriptor inputDescriptor;
try {
inputDescriptor = InputDescriptor.create(useLegacyInput ? MRInputLegacy.class.getName() : MRInput.class.getName()).setUserPayload(MRInputHelpersInternal.createMRInputPayload(conf, null));
} catch (IOException e) {
throw new TezUncheckedException(e);
}
DataSourceDescriptor dsd = DataSourceDescriptor.create(inputDescriptor, null, null);
if (conf.getBoolean(TezRuntimeConfiguration.TEZ_RUNTIME_CONVERT_USER_PAYLOAD_TO_HISTORY_TEXT, TezRuntimeConfiguration.TEZ_RUNTIME_CONVERT_USER_PAYLOAD_TO_HISTORY_TEXT_DEFAULT)) {
dsd.getInputDescriptor().setHistoryText(TezUtils.convertToHistoryText(conf));
}
return dsd;
}
Aggregations