use of us.parr.bookish.parse.BookishParser in project bookish by parrt.
the class Translator method visitParagraph_content.
@Override
public OutputModelObject visitParagraph_content(BookishParser.Paragraph_contentContext ctx) {
List<OutputModelObject> elements = new ArrayList<>();
for (ParseTree el : ctx.children) {
OutputModelObject c = visit(el);
if (c != null) {
elements.add(c);
}
}
// find all REFs within paragraph
Collection<ParseTree> refNodes = XPath.findAll(ctx, "//REF", new BookishParser(null));
List<EntityDef> entitiesRefd = new ArrayList<>();
for (ParseTree t : refNodes) {
String label = stripQuotes(t.getText());
EntityDef def = document.getEntity(label);
if (def != null) {
if (!book.entitiesRendered.contains(def) && !document.entitiesRendered.contains(def)) {
// Nobody has shown it yet
entitiesRefd.add(def);
if (document.entitiesRendered.contains(def)) {
document.entitiesRendered.add(def);
}
if (book.entitiesRendered.contains(def)) {
book.entitiesRendered.add(def);
}
}
} else {
System.err.printf("line %d: Unknown label '%s'\n", ctx.start.getLine(), label);
}
}
return new Paragraph(elements, entitiesRefd);
}
use of us.parr.bookish.parse.BookishParser in project bookish by parrt.
the class Tool method legacy_translate.
// legacy single-doc translation
public Pair<Document, String> legacy_translate(Translator trans, String inputDir, String inputFilename) throws IOException {
Pair<BookishParser.DocumentContext, BookishParser> results = parseChapter(inputDir, inputFilename, 0);
trans.entities = results.b.entities;
// get single chapter
Document doc = (Document) trans.visit(results.a);
doc.chapter.connectContainerTree();
ModelConverter converter = new ModelConverter(trans.templates);
ST outputST = converter.walk(doc);
return new Pair<>(doc, outputST.render());
}
use of us.parr.bookish.parse.BookishParser in project bookish by parrt.
the class Tool method parseChapter.
public Pair<BookishParser.DocumentContext, BookishParser> parseChapter(String inputDir, String inputFilename, int chapNumber) throws IOException {
CharStream input = CharStreams.fromFileName(inputDir + "/" + inputFilename);
BookishLexer lexer = new BookishLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
BookishParser parser = new BookishParser(tokens, inputFilename, chapNumber);
BookishParser.DocumentContext doctree = parser.document();
return new Pair<>(doctree, parser);
}
use of us.parr.bookish.parse.BookishParser in project bookish by parrt.
the class Tool method process.
public void process(String[] args) throws Exception {
options = handleArgs(args);
String metadataFilename = option("metadataFilename");
inputDir = new File(metadataFilename).getParent();
outputDir = option("o");
String outFilename;
Translator trans;
Target target = (Target) optionO("target");
ParrtIO.mkdir(outputDir + "/images");
String snippetsDir = getBuildDir(metadataFilename) + "/snippets";
ParrtIO.mkdir(snippetsDir);
if (metadataFilename.endsWith(".md")) {
// just one file (legacy stuff)
String inputFilename = metadataFilename;
Book book = new Book(this, "", "");
book.entities = new HashMap<>();
trans = new Translator(book, book.entities, target, outputDir);
if (target == Target.HTML) {
outFilename = "index.html";
} else {
outFilename = stripFileExtension(basename(inputFilename)) + ".tex";
}
Pair<Document, String> results = legacy_translate(trans, inputDir, basename(inputFilename));
String output = results.b;
ParrtIO.save(outputDir + "/" + outFilename, output);
// System.out.println("Wrote "+outputDir+"/"+outFilename);
copyImages(book, inputDir, outputDir);
return;
}
// otherwise, read and use metadata
JsonReader jsonReader = Json.createReader(new FileReader(metadataFilename));
JsonObject metadata = jsonReader.readObject();
// System.out.println(metadata);
String title = metadata.getString("title");
Book book = new Book(this, title, null);
String author = metadata.getString("author");
dataDir = metadata.getString("data");
// Rule paragraph needs blank line on the front
author = "\n\n" + author;
trans = new Translator(book, null, target, outputDir);
book.author = translateString(trans, author, "paragraph");
String mainOutFilename;
if (target == Target.HTML) {
mainOutFilename = "index.html";
} else {
mainOutFilename = "book.tex";
}
// parse all documents first to get entity defs
List<BookishParser.DocumentContext> trees = new ArrayList<>();
List<Map<String, EntityDef>> entities = new ArrayList<>();
List<List<ExecutableCodeDef>> codeBlocks = new ArrayList<>();
JsonArray markdownFilenames = metadata.getJsonArray("chapters");
for (JsonValue f : markdownFilenames) {
String fname = stripQuotes(f.toString());
book.filenames.add(fname);
Pair<BookishParser.DocumentContext, BookishParser> results = parseChapter(inputDir, fname, book.chapCounter);
book.chapCounter++;
trees.add(results.a);
entities.add(results.b.entities);
codeBlocks.add(results.b.codeBlocks);
}
executeCodeSnippets(book, getBuildDir(metadataFilename), codeBlocks);
// now walk all trees and translate
List<Document> documents = new ArrayList<>();
for (int i = 0; i < book.filenames.size(); i++) {
String fname = book.filenames.get(i);
BookishParser.DocumentContext tree = trees.get(i);
Map<String, EntityDef> thisDocsEntities = entities.get(i);
trans = new Translator(book, thisDocsEntities, target, outputDir);
// get doc for single chapter
Document doc = (Document) trans.visit(tree);
book.addChapterDocument(doc);
doc.chapter.connectContainerTree();
ModelConverter converter = new ModelConverter(trans.templates);
ST outputST = converter.walk(doc);
// walk all OutputModelObjects created as labeled entities to convert those entities
// unlabeled entities are done in-line
ArrayList<String> labels = new ArrayList<>(thisDocsEntities.keySet());
for (String label : labels) {
EntityDef def = thisDocsEntities.get(label);
def.template = converter.walk(def.model);
if (def.isGloballyVisible()) {
// move to global space
book.entities.put(label, def);
thisDocsEntities.remove(label);
}
}
String output = outputST.render();
doc.markdownFilename = fname;
documents.add(doc);
if (target == Target.HTML) {
outFilename = stripFileExtension(fname) + ".html";
} else {
outFilename = stripFileExtension(fname) + ".tex";
}
ParrtIO.save(outputDir + "/" + outFilename, output);
doc.generatedFilename = outFilename;
// System.out.println("Wrote "+outputDir+"/"+outFilename);
}
ST bookTemplate = trans.templates.getInstanceOf("Book");
bookTemplate.add("model", book);
ParrtIO.save(outputDir + "/" + mainOutFilename, bookTemplate.render());
// System.out.println("Wrote "+outputDir+"/"+mainOutFilename);
copyImages(book, inputDir, outputDir);
execCommandLine(String.format("cp -r %s/css %s", inputDir, outputDir));
// copyImages(BUILD_DIR, outputDir);
}
use of us.parr.bookish.parse.BookishParser in project bookish by parrt.
the class Tool method translateString.
public String translateString(Translator trans, String markdown, String startRule) throws Exception {
CharStream input = CharStreams.fromString(markdown);
BookishLexer lexer = new BookishLexer(input);
CommonTokenStream tokens = new CommonTokenStream(lexer);
BookishParser parser = new BookishParser(tokens, null, 0);
Method startMethod = BookishParser.class.getMethod(startRule, (Class[]) null);
ParseTree doctree = (ParseTree) startMethod.invoke(parser, (Object[]) null);
// get single chapter
OutputModelObject omo = trans.visit(doctree);
ModelConverter converter = new ModelConverter(trans.templates);
ST outputST = converter.walk(omo);
return outputST.render();
}
Aggregations