use of us.parr.bookish.model.Document in project bookish by parrt.
the class Tool method legacy_translate.
// legacy single-doc translation
public Pair<Document, String> legacy_translate(Translator trans, String inputDir, String inputFilename) throws IOException {
Pair<BookishParser.DocumentContext, BookishParser> results = parseChapter(inputDir, inputFilename, 0);
trans.entities = results.b.entities;
// get single chapter
Document doc = (Document) trans.visit(results.a);
doc.chapter.connectContainerTree();
ModelConverter converter = new ModelConverter(trans.templates);
ST outputST = converter.walk(doc);
return new Pair<>(doc, outputST.render());
}
use of us.parr.bookish.model.Document in project bookish by parrt.
the class Tool method process.
public void process(String[] args) throws Exception {
options = handleArgs(args);
String metadataFilename = option("metadataFilename");
inputDir = new File(metadataFilename).getParent();
outputDir = option("o");
String outFilename;
Translator trans;
Target target = (Target) optionO("target");
ParrtIO.mkdir(outputDir + "/images");
String snippetsDir = getBuildDir(metadataFilename) + "/snippets";
ParrtIO.mkdir(snippetsDir);
if (metadataFilename.endsWith(".md")) {
// just one file (legacy stuff)
String inputFilename = metadataFilename;
Book book = new Book(this, "", "");
book.entities = new HashMap<>();
trans = new Translator(book, book.entities, target, outputDir);
if (target == Target.HTML) {
outFilename = "index.html";
} else {
outFilename = stripFileExtension(basename(inputFilename)) + ".tex";
}
Pair<Document, String> results = legacy_translate(trans, inputDir, basename(inputFilename));
String output = results.b;
ParrtIO.save(outputDir + "/" + outFilename, output);
// System.out.println("Wrote "+outputDir+"/"+outFilename);
copyImages(book, inputDir, outputDir);
return;
}
// otherwise, read and use metadata
JsonReader jsonReader = Json.createReader(new FileReader(metadataFilename));
JsonObject metadata = jsonReader.readObject();
// System.out.println(metadata);
String title = metadata.getString("title");
Book book = new Book(this, title, null);
String author = metadata.getString("author");
dataDir = metadata.getString("data");
// Rule paragraph needs blank line on the front
author = "\n\n" + author;
trans = new Translator(book, null, target, outputDir);
book.author = translateString(trans, author, "paragraph");
String mainOutFilename;
if (target == Target.HTML) {
mainOutFilename = "index.html";
} else {
mainOutFilename = "book.tex";
}
// parse all documents first to get entity defs
List<BookishParser.DocumentContext> trees = new ArrayList<>();
List<Map<String, EntityDef>> entities = new ArrayList<>();
List<List<ExecutableCodeDef>> codeBlocks = new ArrayList<>();
JsonArray markdownFilenames = metadata.getJsonArray("chapters");
for (JsonValue f : markdownFilenames) {
String fname = stripQuotes(f.toString());
book.filenames.add(fname);
Pair<BookishParser.DocumentContext, BookishParser> results = parseChapter(inputDir, fname, book.chapCounter);
book.chapCounter++;
trees.add(results.a);
entities.add(results.b.entities);
codeBlocks.add(results.b.codeBlocks);
}
executeCodeSnippets(book, getBuildDir(metadataFilename), codeBlocks);
// now walk all trees and translate
List<Document> documents = new ArrayList<>();
for (int i = 0; i < book.filenames.size(); i++) {
String fname = book.filenames.get(i);
BookishParser.DocumentContext tree = trees.get(i);
Map<String, EntityDef> thisDocsEntities = entities.get(i);
trans = new Translator(book, thisDocsEntities, target, outputDir);
// get doc for single chapter
Document doc = (Document) trans.visit(tree);
book.addChapterDocument(doc);
doc.chapter.connectContainerTree();
ModelConverter converter = new ModelConverter(trans.templates);
ST outputST = converter.walk(doc);
// walk all OutputModelObjects created as labeled entities to convert those entities
// unlabeled entities are done in-line
ArrayList<String> labels = new ArrayList<>(thisDocsEntities.keySet());
for (String label : labels) {
EntityDef def = thisDocsEntities.get(label);
def.template = converter.walk(def.model);
if (def.isGloballyVisible()) {
// move to global space
book.entities.put(label, def);
thisDocsEntities.remove(label);
}
}
String output = outputST.render();
doc.markdownFilename = fname;
documents.add(doc);
if (target == Target.HTML) {
outFilename = stripFileExtension(fname) + ".html";
} else {
outFilename = stripFileExtension(fname) + ".tex";
}
ParrtIO.save(outputDir + "/" + outFilename, output);
doc.generatedFilename = outFilename;
// System.out.println("Wrote "+outputDir+"/"+outFilename);
}
ST bookTemplate = trans.templates.getInstanceOf("Book");
bookTemplate.add("model", book);
ParrtIO.save(outputDir + "/" + mainOutFilename, bookTemplate.render());
// System.out.println("Wrote "+outputDir+"/"+mainOutFilename);
copyImages(book, inputDir, outputDir);
execCommandLine(String.format("cp -r %s/css %s", inputDir, outputDir));
// copyImages(BUILD_DIR, outputDir);
}
use of us.parr.bookish.model.Document in project bookish by parrt.
the class Translator method visitDocument.
@Override
public OutputModelObject visitDocument(BookishParser.DocumentContext ctx) {
this.document = new Document(ctx);
document.book = book;
document.entities = entities;
document.chapter = (Chapter) visit(ctx.chapter());
return document;
}
Aggregations