use of org.asciidoctor.Asciidoctor in project gerrit by GerritCodeReview.
the class AsciiDoctor method renderFiles.
private void renderFiles(List<String> inputFiles, ZipOutputStream zip) throws IOException {
Asciidoctor asciidoctor = JRubyAsciidoctor.create();
for (String inputFile : inputFiles) {
String outName = mapInFileToOutFile(inputFile, inExt, outExt);
File out = bazel ? new File(outName) : new File(tmpdir, outName);
if (!bazel) {
out.getParentFile().mkdirs();
}
File input = new File(inputFile);
Options options = createOptions(basedir != null ? basedir : input.getParentFile(), out);
asciidoctor.renderFile(input, options);
if (zip != null) {
zipFile(out, outName, zip);
}
}
}
use of org.asciidoctor.Asciidoctor in project camel by apache.
the class PrepareCatalogMojo method executeDocuments.
protected void executeDocuments(Set<String> components, Set<String> dataformats, Set<String> languages, Set<String> others) throws MojoExecutionException, MojoFailureException {
getLog().info("Copying all Camel documents (ascii docs)");
// lets use sorted set/maps
Set<File> adocFiles = new TreeSet<File>();
Set<File> missingAdocFiles = new TreeSet<File>();
Set<File> duplicateAdocFiles = new TreeSet<File>();
// find all camel maven modules
if (componentsDir != null && componentsDir.isDirectory()) {
File[] componentFiles = componentsDir.listFiles();
if (componentFiles != null) {
for (File dir : componentFiles) {
if (dir.isDirectory() && !"target".equals(dir.getName()) && !dir.getName().startsWith(".") && !excludeDocumentDir(dir.getName())) {
File target = new File(dir, "src/main/docs");
// special for these as they are in sub dir
if ("camel-salesforce".equals(dir.getName())) {
target = new File(dir, "camel-salesforce-component/src/main/docs");
} else if ("camel-linkedin".equals(dir.getName())) {
target = new File(dir, "camel-linkedin-component/src/main/docs");
} else if ("camel-olingo2".equals(dir.getName())) {
target = new File(dir, "camel-olingo2-component/src/main/docs");
} else if ("camel-box".equals(dir.getName())) {
target = new File(dir, "camel-box-component/src/main/docs");
}
int before = adocFiles.size();
findAsciiDocFilesRecursive(target, adocFiles, new CamelAsciiDocFileFilter());
int after = adocFiles.size();
if (before == after) {
missingAdocFiles.add(dir);
}
}
}
}
}
if (coreDir != null && coreDir.isDirectory()) {
File target = new File(coreDir, "src/main/docs");
findAsciiDocFilesRecursive(target, adocFiles, new CamelAsciiDocFileFilter());
}
getLog().info("Found " + adocFiles.size() + " ascii document files");
// make sure to create out dir
documentsOutDir.mkdirs();
// use ascii doctor to convert the adoc files to html so we have documentation in this format as well
Asciidoctor asciidoctor = Asciidoctor.Factory.create();
int converted = 0;
for (File file : adocFiles) {
File to = new File(documentsOutDir, file.getName());
if (to.exists()) {
duplicateAdocFiles.add(to);
getLog().warn("Duplicate document name detected: " + to);
}
try {
copyFile(file, to);
} catch (IOException e) {
throw new MojoFailureException("Cannot copy file from " + file + " -> " + to, e);
}
// convert adoc to html as well
if (file.getName().endsWith(".adoc")) {
String newName = file.getName().substring(0, file.getName().length() - 5) + ".html";
File toHtml = new File(documentsOutDir, newName);
getLog().debug("Converting ascii document to html -> " + toHtml);
asciidoctor.convertFile(file, OptionsBuilder.options().toFile(toHtml));
converted++;
try {
// now fix the html file because we don't want to include certain lines
List<String> lines = FileUtils.readLines(toHtml);
List<String> output = new ArrayList<>();
for (String line : lines) {
// skip these lines
if (line.contains("% raw %") || line.contains("% endraw %")) {
continue;
}
output.add(line);
}
if (lines.size() != output.size()) {
FileUtils.writeLines(toHtml, output, false);
}
} catch (IOException e) {
// ignore
}
}
}
if (converted > 0) {
getLog().info("Converted " + converted + " ascii documents to HTML");
}
Set<String> docs = new LinkedHashSet<>();
File all = new File(documentsOutDir, "../docs.properties");
try {
FileOutputStream fos = new FileOutputStream(all, false);
String[] names = documentsOutDir.list();
List<String> documents = new ArrayList<String>();
// sort the names
for (String name : names) {
if (name.endsWith(".adoc")) {
// strip out .adoc from the name
String documentName = name.substring(0, name.length() - 5);
documents.add(documentName);
}
}
Collections.sort(documents);
for (String name : documents) {
fos.write(name.getBytes());
fos.write("\n".getBytes());
docs.add(name);
}
fos.close();
} catch (IOException e) {
throw new MojoFailureException("Error writing to file " + all);
}
printDocumentsReport(adocFiles, duplicateAdocFiles, missingAdocFiles);
// find out if we have documents for each component / dataformat / languages / others
printMissingDocumentsReport(docs, components, dataformats, languages, others);
}
use of org.asciidoctor.Asciidoctor in project lucene-solr by apache.
the class BuildNavAndPDFBody method main.
public static void main(String[] args) throws Exception {
if (args.length != 2) {
throw new RuntimeException("Wrong # of args: " + args.length);
}
final File adocDir = new File(args[0]);
final String mainPageShortname = args[1];
if (!adocDir.exists()) {
throw new RuntimeException("asciidoc directory does not exist: " + adocDir.toString());
}
// build up a quick mapping of every known page
System.out.println("Building up tree of all known pages");
final Map<String, Page> allPages = new LinkedHashMap<String, Page>();
Asciidoctor doctor = null;
try {
doctor = Factory.create();
final File[] adocFiles = adocDir.listFiles(ADOC_FILE_NAMES);
for (File file : adocFiles) {
Page page = new Page(file, doctor.readDocumentHeader(file));
if (allPages.containsKey(page.shortname)) {
throw new RuntimeException("multiple pages with same shortname: " + page.file.toString() + " and " + allPages.get(page.shortname));
}
allPages.put(page.shortname, page);
}
} finally {
if (null != doctor) {
doctor.shutdown();
doctor = null;
}
}
// build up a hierarchical structure rooted at our mainPage
final Page mainPage = allPages.get(mainPageShortname);
if (null == mainPage) {
throw new RuntimeException("no main-page found with shortname: " + mainPageShortname);
}
mainPage.buildKidsRecursive(allPages);
// TODO: use depthFirstWalk to prune allPages to validate that we don't have any loops or orphan pages
// Build up the PDF file,
// while doing this also build up some next/prev maps for use in building the scrollnav
File pdfFile = new File(new File(adocDir, "_data"), "pdf-main-body.adoc");
if (pdfFile.exists()) {
throw new RuntimeException(pdfFile.toString() + " already exists");
}
final Map<String, Page> nextPage = new HashMap<String, Page>();
final Map<String, Page> prevPage = new HashMap<String, Page>();
System.out.println("Creating " + pdfFile.toString());
try (Writer w = new OutputStreamWriter(new FileOutputStream(pdfFile), "UTF-8")) {
// Note: not worrying about headers or anything like that ...
// expecting this file to just be included by the main PDF file.
// track how deep we are so we can adjust headers accordingly
// start with a "negative" depth to treat all "top level" pages as same depth as main-page using Math.max
// (see below)
final AtomicInteger depth = new AtomicInteger(-1);
// the previous page seen in our walk
AtomicReference<Page> previous = new AtomicReference<Page>();
mainPage.depthFirstWalk(new Page.RecursiveAction() {
public boolean act(Page page) {
try {
if (null != previous.get()) {
// add previous as our 'prev' page, and ourselves as the 'next' of previous
prevPage.put(page.shortname, previous.get());
nextPage.put(previous.get().shortname, page);
}
previous.set(page);
// HACK: where this file actually lives will determine what we need here...
w.write("include::../");
w.write(page.file.getName());
w.write("[leveloffset=+" + Math.max(0, depth.intValue()) + "]\n\n");
depth.incrementAndGet();
return true;
} catch (IOException ioe) {
throw new RuntimeException("IOE recursively acting on " + page.shortname, ioe);
}
}
public void postKids(Page page) {
depth.decrementAndGet();
}
});
}
// Build up the scrollnav file for jekyll's footer
File scrollnavFile = new File(new File(adocDir, "_data"), "scrollnav.json");
if (scrollnavFile.exists()) {
throw new RuntimeException(scrollnavFile.toString() + " already exists");
}
System.out.println("Creating " + scrollnavFile.toString());
try (Writer w = new OutputStreamWriter(new FileOutputStream(scrollnavFile), "UTF-8")) {
JSONObject scrollnav = new JSONObject();
for (Page p : allPages.values()) {
JSONObject current = new JSONObject();
Page prev = prevPage.get(p.shortname);
Page next = nextPage.get(p.shortname);
if (null != prev) {
current.put("prev", new JSONObject().put("url", prev.permalink).put("title", prev.title));
}
if (null != next) {
current.put("next", new JSONObject().put("url", next.permalink).put("title", next.title));
}
scrollnav.put(p.shortname, current);
}
// HACK: jekyll doesn't like escaped forward slashes in it's JSON?
w.write(scrollnav.toString(2).replaceAll("\\\\/", "/"));
}
// Build up the sidebar file for jekyll
File sidebarFile = new File(new File(adocDir, "_data"), "sidebar.json");
if (sidebarFile.exists()) {
throw new RuntimeException(sidebarFile.toString() + " already exists");
}
System.out.println("Creating " + sidebarFile.toString());
try (Writer w = new OutputStreamWriter(new FileOutputStream(sidebarFile), "UTF-8")) {
// A stack for tracking what we're working on as we recurse
final Stack<JSONObject> stack = new Stack<JSONObject>();
mainPage.depthFirstWalk(new Page.RecursiveAction() {
public boolean act(Page page) {
final int depth = stack.size();
if (4 < depth) {
System.err.println("ERROR: depth==" + depth + " for " + page.permalink);
System.err.println("sidebar.html template can not support pages this deep");
System.exit(-1);
}
try {
final JSONObject current = new JSONObject().put("title", page.title).put("url", page.permalink).put("depth", depth).put("kids", new JSONArray());
if (0 < depth) {
JSONObject parent = stack.peek();
((JSONArray) parent.get("kids")).put(current);
}
stack.push(current);
} catch (JSONException e) {
throw new RuntimeException(e);
}
return true;
}
public void postKids(Page page) {
final JSONObject current = stack.pop();
if (0 == stack.size()) {
assert page == mainPage;
try {
// HACK: jekyll doesn't like escaped forward slashes in it's JSON?
w.write(current.toString(2).replaceAll("\\\\/", "/"));
} catch (IOException | JSONException e) {
throw new RuntimeException(e);
}
}
}
});
}
}
use of org.asciidoctor.Asciidoctor in project meecrowave by apache.
the class PDFify method generatePdf.
public static void generatePdf(final File from, final File targetBase) throws IOException {
final Path sourceBase = from.toPath();
final Asciidoctor asciidoctor = Asciidoctor.Factory.create();
final ExecutorService pool = Executors.newFixedThreadPool(16);
Files.walkFileTree(sourceBase, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(final Path file, final BasicFileAttributes attrs) throws IOException {
final String fileName = file.getFileName().toString();
if (fileName.endsWith(".adoc")) {
pool.submit(() -> {
final String path = sourceBase.relativize(file).toString();
final File target = new File(targetBase, path.substring(0, path.length() - "adoc".length()) + "pdf");
final File asFile = file.toFile();
final Map<String, Object> attributes = asciidoctor.readDocumentHeader(asFile).getAttributes();
// if we generate the PDF link we need to create the PDF excepted if it is expected to be manual
if (attributes.containsKey("jbake-meecrowavepdf") && !attributes.containsKey("jbake-meecrowavepdf-manual")) {
target.getParentFile().mkdirs();
asciidoctor.convertFile(asFile, options().safe(UNSAFE).backend("pdf").attributes(AttributesBuilder.attributes().attribute("source-highlighter", "coderay").attribute("context_rootpath", "http://openwebbeans.apache.org/meecrowave")).toFile(target).get());
System.out.println("Generated " + target);
}
});
}
return super.visitFile(file, attrs);
}
});
pool.shutdown();
try {
pool.awaitTermination(1, TimeUnit.HOURS);
} catch (final InterruptedException e) {
Thread.interrupted();
}
}
Aggregations