use of com.cloudbees.jenkins.support.api.PrintedContent in project support-core-plugin by jenkinsci.
the class SlaveLogs method addContents.
@Override
public void addContents(@NonNull Container container) {
// expensive remote computation are pooled together and executed later concurrently across all the agents
List<java.util.concurrent.Callable<List<FileContent>>> tasks = Lists.newArrayList();
// id is awkward because of backward compatibility
SmartLogFetcher logFetcher = new SmartLogFetcher("cache", new LogFilenameFilter());
SmartLogFetcher winswLogFetcher = new SmartLogFetcher("winsw", new WinswLogfileFilter());
final boolean needHack = SlaveLogFetcher.isRequired();
for (final Node node : Jenkins.getInstance().getNodes()) {
if (node.toComputer() instanceof SlaveComputer) {
container.add(new PrintedContent("nodes/slave/" + node.getNodeName() + "/jenkins.log") {
@Override
protected void printTo(PrintWriter out) throws IOException {
Computer computer = node.toComputer();
if (computer == null) {
out.println("N/A");
} else {
try {
List<LogRecord> records = null;
if (needHack) {
VirtualChannel channel = computer.getChannel();
if (channel != null) {
hudson.remoting.Future<List<LogRecord>> future = SlaveLogFetcher.getLogRecords(channel);
records = future.get(REMOTE_OPERATION_TIMEOUT_MS, TimeUnit.MILLISECONDS);
}
}
if (records == null) {
records = computer.getLogRecords();
}
for (ListIterator<LogRecord> iterator = records.listIterator(records.size()); iterator.hasPrevious(); ) {
LogRecord logRecord = iterator.previous();
out.print(LOG_FORMATTER.format(logRecord));
}
} catch (Throwable e) {
out.println();
SupportLogFormatter.printStackTrace(e, out);
}
}
out.flush();
}
});
}
addSlaveJulLogRecords(container, tasks, node, logFetcher);
addWinsStdoutStderrLog(tasks, node, winswLogFetcher);
}
// execute all the expensive computations in parallel to speed up the time
if (!tasks.isEmpty()) {
ExecutorService service = Executors.newFixedThreadPool(Math.max(1, Math.min(Runtime.getRuntime().availableProcessors() * 2, tasks.size())), new ExceptionCatchingThreadFactory(new DaemonThreadFactory()));
try {
long expiresNanoTime = System.nanoTime() + TimeUnit.SECONDS.toNanos(SupportPlugin.REMOTE_OPERATION_CACHE_TIMEOUT_SEC);
for (java.util.concurrent.Future<List<FileContent>> r : service.invokeAll(tasks, SupportPlugin.REMOTE_OPERATION_CACHE_TIMEOUT_SEC, TimeUnit.SECONDS)) {
try {
for (FileContent c : r.get(Math.max(1, expiresNanoTime - System.nanoTime()), TimeUnit.NANOSECONDS)) {
container.add(c);
}
} catch (ExecutionException e) {
LOGGER.log(Level.WARNING, "Could not retrieve some of the remote node extra logs", e);
} catch (TimeoutException e) {
LOGGER.log(Level.WARNING, "Could not retrieve some of the remote node extra logs", e);
r.cancel(false);
}
}
} catch (InterruptedException e) {
LOGGER.log(Level.WARNING, "Could not retrieve some of the remote node extra logs", e);
} finally {
service.shutdown();
}
}
}
Aggregations