use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LogCLIHelpers method dumpAllContainersLogs.
@Private
public int dumpAllContainersLogs(ContainerLogsRequest options) throws IOException {
ApplicationId appId = options.getAppId();
String appOwner = options.getAppOwner();
String localDir = options.getOutputLocalDir();
List<String> logTypes = new ArrayList<String>(options.getLogTypes());
RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(appId, appOwner);
if (nodeFiles == null) {
return -1;
}
boolean foundAnyLogs = false;
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
if (thisNodeFile.getPath().getName().equals(appId + ".har")) {
Path p = new Path("har:///" + thisNodeFile.getPath().toUri().getRawPath());
nodeFiles = HarFs.get(p.toUri(), conf).listStatusIterator(p);
continue;
}
if (!thisNodeFile.getPath().getName().endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
try {
DataInputStream valueStream;
LogKey key = new LogKey();
valueStream = reader.next(key);
while (valueStream != null) {
PrintStream out = createPrintStream(localDir, thisNodeFile.getPath().getName(), key.toString());
try {
String containerString = String.format(CONTAINER_ON_NODE_PATTERN, key, thisNodeFile.getPath().getName());
out.println(containerString);
out.println("LogAggregationType: AGGREGATED");
out.println(StringUtils.repeat("=", containerString.length()));
while (true) {
try {
if (logTypes == null || logTypes.isEmpty()) {
LogReader.readAContainerLogsForALogType(valueStream, out, thisNodeFile.getModificationTime(), options.getBytes());
foundAnyLogs = true;
} else {
int result = LogReader.readContainerLogsForALogType(valueStream, out, thisNodeFile.getModificationTime(), logTypes, options.getBytes());
if (result == 0) {
foundAnyLogs = true;
}
}
} catch (EOFException eof) {
break;
}
}
} finally {
closePrintStream(out);
}
// Next container
key = new LogKey();
valueStream = reader.next(key);
}
} finally {
reader.close();
}
}
}
if (!foundAnyLogs) {
emptyLogDir(LogAggregationUtils.getRemoteAppLogDir(conf, appId, appOwner).toString());
return -1;
}
return 0;
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LogCLIHelpers method printContainersList.
@Private
public void printContainersList(ContainerLogsRequest options, PrintStream out, PrintStream err) throws IOException {
ApplicationId appId = options.getAppId();
String appOwner = options.getAppOwner();
String nodeId = options.getNodeId();
String nodeIdStr = (nodeId == null) ? null : LogAggregationUtils.getNodeString(nodeId);
RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(appId, appOwner);
if (nodeFiles == null) {
return;
}
boolean foundAnyLogs = false;
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
if (nodeIdStr != null) {
if (!thisNodeFile.getPath().getName().contains(nodeIdStr)) {
continue;
}
}
if (!thisNodeFile.getPath().getName().endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
AggregatedLogFormat.LogReader reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
try {
DataInputStream valueStream;
LogKey key = new LogKey();
valueStream = reader.next(key);
while (valueStream != null) {
out.println(String.format(CONTAINER_ON_NODE_PATTERN, key, thisNodeFile.getPath().getName()));
foundAnyLogs = true;
// Next container
key = new LogKey();
valueStream = reader.next(key);
}
} finally {
reader.close();
}
}
}
if (!foundAnyLogs) {
if (nodeId != null) {
err.println("Can not find information for any containers on " + nodeId);
} else {
err.println("Can not find any container information for " + "the application: " + appId);
}
}
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LogCLIHelpers method printNodesList.
@Private
public void printNodesList(ContainerLogsRequest options, PrintStream out, PrintStream err) throws IOException {
ApplicationId appId = options.getAppId();
String appOwner = options.getAppOwner();
RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(appId, appOwner);
if (nodeFiles == null) {
return;
}
boolean foundNode = false;
StringBuilder sb = new StringBuilder();
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
sb.append(thisNodeFile.getPath().getName() + "\n");
foundNode = true;
}
if (!foundNode) {
err.println("No nodes found that aggregated logs for " + "the application: " + appId);
} else {
out.println(sb.toString());
}
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class LogCLIHelpers method dumpAContainerLogsForLogTypeWithoutNodeId.
@Private
public int dumpAContainerLogsForLogTypeWithoutNodeId(ContainerLogsRequest options) throws IOException {
ApplicationId applicationId = options.getAppId();
String jobOwner = options.getAppOwner();
String containerId = options.getContainerId();
String localDir = options.getOutputLocalDir();
List<String> logType = new ArrayList<String>(options.getLogTypes());
RemoteIterator<FileStatus> nodeFiles = getRemoteNodeFileDir(applicationId, jobOwner);
if (nodeFiles == null) {
return -1;
}
boolean foundContainerLogs = false;
while (nodeFiles.hasNext()) {
FileStatus thisNodeFile = nodeFiles.next();
if (!thisNodeFile.getPath().getName().endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
AggregatedLogFormat.LogReader reader = null;
PrintStream out = System.out;
try {
reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
if (getContainerLogsStream(containerId, reader) == null) {
continue;
}
// We have to re-create reader object to reset the stream index
// after calling getContainerLogsStream which would move the stream
// index to the end of the log file.
reader = new AggregatedLogFormat.LogReader(getConf(), thisNodeFile.getPath());
out = createPrintStream(localDir, thisNodeFile.getPath().getName(), containerId);
String containerString = String.format(CONTAINER_ON_NODE_PATTERN, containerId, thisNodeFile.getPath().getName());
out.println(containerString);
out.println("LogAggregationType: AGGREGATED");
out.println(StringUtils.repeat("=", containerString.length()));
if (logType == null || logType.isEmpty()) {
if (dumpAContainerLogs(containerId, reader, out, thisNodeFile.getModificationTime(), options.getBytes()) > -1) {
foundContainerLogs = true;
}
} else {
if (dumpAContainerLogsForALogType(containerId, reader, out, thisNodeFile.getModificationTime(), logType, options.getBytes()) > -1) {
foundContainerLogs = true;
}
}
} finally {
if (reader != null) {
reader.close();
}
closePrintStream(out);
}
}
}
if (!foundContainerLogs) {
containerLogNotFound(containerId);
return -1;
}
return 0;
}
use of org.apache.hadoop.classification.InterfaceAudience.Private in project hadoop by apache.
the class TimelineV2ClientImpl method putObjects.
@Private
protected void putObjects(String path, MultivaluedMap<String, String> params, Object obj) throws IOException, YarnException {
int retries = verifyRestEndPointAvailable();
// timelineServiceAddress could be stale, add retry logic here.
boolean needRetry = true;
while (needRetry) {
try {
URI uri = TimelineConnector.constructResURI(getConfig(), timelineServiceAddress, RESOURCE_URI_STR_V2);
putObjects(uri, path, params, obj);
needRetry = false;
} catch (IOException e) {
// handle exception for timelineServiceAddress being updated.
checkRetryWithSleep(retries, e);
retries--;
}
}
}
Aggregations