use of com.cloudbees.jenkins.support.api.FileContent in project support-core-plugin by jenkinsci.
the class GCLogs method handleRotatedLogs.
/**
* Two cases:
* <ul>
* <li>The file name contains <code>%t</code> or <code>%p</code> somewhere in the middle:
* then we are simply going to replace those by <code>.*</code> to find associated logs and match files by regex.
* This will match GC logs from the current JVM execution, or possibly previous ones.</li>
* <li>or that feature is not used, then we simply match by "starts with"</li>
* </ul>
*
* @param gcLogFileLocation the specified value after <code>-Xloggc:</code>
* @param result the container where to add the found logs, if any.
* @see https://bugs.openjdk.java.net/browse/JDK-7164841
*/
private void handleRotatedLogs(@Nonnull final String gcLogFileLocation, Container result) {
File gcLogFile = new File(gcLogFileLocation);
// always add .* in the end because this is where the numbering is going to happen
String regex = gcLogFile.getName().replaceAll("%[pt]", ".*") + ".*";
final Pattern gcLogFilesPattern = Pattern.compile(regex);
File parentDirectory = gcLogFile.getParentFile();
if (parentDirectory == null || !parentDirectory.exists()) {
LOGGER.warning("[Support Bundle] " + parentDirectory + " does not exist, cannot collect gc logging files.");
return;
}
File[] gcLogs = parentDirectory.listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return gcLogFilesPattern.matcher(name).matches();
}
});
if (gcLogs == null || gcLogs.length == 0) {
LOGGER.warning("No GC logging files found, although the VM argument was found. This is probably a bug.");
return;
}
LOGGER.finest("Found " + gcLogs.length + " matching files in " + parentDirectory.getAbsolutePath());
for (File gcLog : gcLogs) {
LOGGER.finest("Adding '" + gcLog.getName() + "' file");
result.add(new FileContent(GCLOGS_BUNDLE_ROOT + gcLog.getName(), gcLog));
}
}
use of com.cloudbees.jenkins.support.api.FileContent in project support-core-plugin by jenkinsci.
the class JenkinsLogs method addLogRecorders.
/**
* Dumps the content of {@link LogRecorder}, which is the groups of loggers configured
* by the user. The contents are also ring buffer and only remembers recent 256 or so entries.
*/
private void addLogRecorders(Container result) {
for (Map.Entry<String, LogRecorder> entry : logRecorders.entrySet()) {
String name = entry.getKey();
String entryName = "nodes/master/logs/custom/" + name + ".log";
File storedFile = new File(customLogs, name + ".log");
if (storedFile.isFile()) {
result.add(new FileContent(entryName, storedFile));
} else {
// Was not stored for some reason; fine, just load the memory buffer.
final LogRecorder recorder = entry.getValue();
result.add(new LogRecordContent(entryName) {
@Override
public Iterable<LogRecord> getLogRecords() {
return recorder.getLogRecords();
}
});
}
}
}
use of com.cloudbees.jenkins.support.api.FileContent in project support-core-plugin by jenkinsci.
the class JenkinsLogs method addMasterJulLogRecords.
/**
* Adds j.u.l logging output that the support-core plugin captures.
*
* <p>
* Compared to {@link #addMasterJulRingBuffer(Container)}, this one uses disk files,
* so it remembers larger number of entries.
*/
private void addMasterJulLogRecords(Container result) {
// this file captures the most recent of those that are still kept around in memory.
// this overlaps with Jenkins.logRecords, and also overlaps with what's written in files,
// but added nonetheless just in case.
//
// should be ignorable.
result.add(new LogRecordContent("nodes/master/logs/all_memory_buffer.log") {
@Override
public Iterable<LogRecord> getLogRecords() {
return SupportPlugin.getInstance().getAllLogRecords();
}
});
final File[] julLogFiles = SupportPlugin.getRootDirectory().listFiles(new LogFilenameFilter());
if (julLogFiles == null) {
LOGGER.log(Level.WARNING, "Cannot add master java.util.logging logs to the bundle. Cannot access log files");
return;
}
// log records written to the disk
for (File file : julLogFiles) {
result.add(new FileContent("nodes/master/logs/" + file.getName(), file));
}
}
use of com.cloudbees.jenkins.support.api.FileContent in project support-core-plugin by jenkinsci.
the class SlaveLogs method addContents.
@Override
public void addContents(@NonNull Container container) {
// expensive remote computation are pooled together and executed later concurrently across all the agents
List<java.util.concurrent.Callable<List<FileContent>>> tasks = Lists.newArrayList();
// id is awkward because of backward compatibility
SmartLogFetcher logFetcher = new SmartLogFetcher("cache", new LogFilenameFilter());
SmartLogFetcher winswLogFetcher = new SmartLogFetcher("winsw", new WinswLogfileFilter());
final boolean needHack = SlaveLogFetcher.isRequired();
for (final Node node : Jenkins.getInstance().getNodes()) {
if (node.toComputer() instanceof SlaveComputer) {
container.add(new PrintedContent("nodes/slave/" + node.getNodeName() + "/jenkins.log") {
@Override
protected void printTo(PrintWriter out) throws IOException {
Computer computer = node.toComputer();
if (computer == null) {
out.println("N/A");
} else {
try {
List<LogRecord> records = null;
if (needHack) {
VirtualChannel channel = computer.getChannel();
if (channel != null) {
hudson.remoting.Future<List<LogRecord>> future = SlaveLogFetcher.getLogRecords(channel);
records = future.get(REMOTE_OPERATION_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
}
if (records == null) {
records = computer.getLogRecords();
}
for (ListIterator<LogRecord> iterator = records.listIterator(records.size()); iterator.hasPrevious(); ) {
LogRecord logRecord = iterator.previous();
out.print(LOG_FORMATTER.format(logRecord));
}
} catch (Throwable e) {
out.println();
SupportLogFormatter.printStackTrace(e, out);
}
}
out.flush();
}
});
}
addSlaveJulLogRecords(container, tasks, node, logFetcher);
addWinsStdoutStderrLog(tasks, node, winswLogFetcher);
}
// execute all the expensive computations in parallel to speed up the time
if (!tasks.isEmpty()) {
ExecutorService service = Executors.newFixedThreadPool(Math.max(1, Math.min(Runtime.getRuntime().availableProcessors() * 2, tasks.size())), new ExceptionCatchingThreadFactory(new DaemonThreadFactory()));
try {
long expiresNanoTime = System.nanoTime() + TimeUnit.SECONDS.toNanos(SupportPlugin.REMOTE_OPERATION_CACHE_TIMEOUT_SEC);
for (java.util.concurrent.Future<List<FileContent>> r : service.invokeAll(tasks, SupportPlugin.REMOTE_OPERATION_CACHE_TIMEOUT_SEC, TimeUnit.SECONDS)) {
try {
for (FileContent c : r.get(Math.max(1, expiresNanoTime - System.nanoTime()), TimeUnit.NANOSECONDS)) {
container.add(c);
}
} catch (ExecutionException e) {
LOGGER.log(Level.WARNING, "Could not retrieve some of the remote node extra logs", e);
} catch (TimeoutException e) {
LOGGER.log(Level.WARNING, "Could not retrieve some of the remote node extra logs", e);
r.cancel(false);
}
}
} catch (InterruptedException e) {
LOGGER.log(Level.WARNING, "Could not retrieve some of the remote node extra logs", e);
} finally {
service.shutdown();
}
}
}
use of com.cloudbees.jenkins.support.api.FileContent in project support-core-plugin by jenkinsci.
the class SlaveLogs method addSlaveJulLogRecords.
/**
* Captures a "recent" (but still fairly large number of) j.u.l entries written on this agent.
*
* @see JenkinsLogs#addMasterJulLogRecords(Container)
*/
private void addSlaveJulLogRecords(Container result, List<java.util.concurrent.Callable<List<FileContent>>> tasks, final Node node, final SmartLogFetcher logFetcher) {
final FilePath rootPath = node.getRootPath();
if (rootPath != null) {
// rotated log files stored on the disk
tasks.add(new java.util.concurrent.Callable<List<FileContent>>() {
public List<FileContent> call() throws Exception {
List<FileContent> result = new ArrayList<FileContent>();
FilePath supportPath = rootPath.child(SUPPORT_DIRECTORY_NAME);
if (supportPath.isDirectory()) {
final Map<String, File> logFiles = logFetcher.forNode(node).getLogFiles(supportPath);
for (Map.Entry<String, File> entry : logFiles.entrySet()) {
result.add(new FileContent("nodes/slave/" + node.getNodeName() + "/logs/" + entry.getKey(), entry.getValue()));
}
}
return result;
}
});
}
// this file captures the most recent of those that are still kept around in memory.
// this overlaps with Jenkins.logRecords, and also overlaps with what's written in files,
// but added nonetheless just in case.
//
// should be ignorable.
result.add(new LogRecordContent("nodes/slave/" + node.getNodeName() + "/logs/all_memory_buffer.log") {
@Override
public Iterable<LogRecord> getLogRecords() throws IOException {
try {
return SupportPlugin.getInstance().getAllLogRecords(node);
} catch (InterruptedException e) {
throw (IOException) new InterruptedIOException().initCause(e);
}
}
});
}
Aggregations