use of edu.stanford.nlp.pipeline.Annotation in project CoreNLP by stanfordnlp.
the class XMLToAnnotation method processCoreNLPIfDoesNotExist.
public static void processCoreNLPIfDoesNotExist(File processedFile, Properties coreNLPProps, String text) {
if (!processedFile.exists()) {
try {
StanfordCoreNLP coreNLP = new StanfordCoreNLP(coreNLPProps);
// this document holds the split for paragraphs.
Annotation processedAnnotation = coreNLP.process(text);
ProtobufAnnotationSerializer pas = new ProtobufAnnotationSerializer(true);
OutputStream fos = new BufferedOutputStream(new FileOutputStream(processedFile.getAbsolutePath()));
pas.write(processedAnnotation, fos);
} catch (IOException e) {
e.printStackTrace();
}
}
}
use of edu.stanford.nlp.pipeline.Annotation in project CoreNLP by stanfordnlp.
the class GenerateAlignmentData method main.
public static void main(String[] args) throws IOException {
Properties props = new Properties();
props.put("annotators", "tokenize,ssplit");
props.put("ssplit.eolonly", "true");
StanfordCoreNLP pipeline = new StanfordCoreNLP(props);
String filename = args[0];
String sentences = args[1];
String graphs = args[2];
BufferedReader reader = IOUtils.readerFromString(filename);
PrintWriter sentencesFile = IOUtils.getPrintWriter(sentences);
PrintWriter graphsFile = IOUtils.getPrintWriter(graphs);
for (String line = reader.readLine(); line != null; line = reader.readLine()) {
SceneGraphImage img = SceneGraphImage.readFromJSON(line);
if (img == null) {
continue;
}
for (SceneGraphImageRegion region : img.regions) {
Annotation doc = new Annotation(region.phrase);
pipeline.annotate(doc);
CoreMap sentence = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
String tokenizedSentence = StringUtils.join(tokens.stream().map(CoreLabel::word), " ");
for (SceneGraphImageAttribute attr : region.attributes) {
sentencesFile.printf("%s%n", tokenizedSentence);
graphsFile.printf("%s%n", StringUtils.join(attr.text));
}
for (SceneGraphImageRelationship reln : region.relationships) {
sentencesFile.printf("%s%n", tokenizedSentence);
graphsFile.printf("%s%n", StringUtils.join(reln.text));
}
}
}
}
use of edu.stanford.nlp.pipeline.Annotation in project CoreNLP by stanfordnlp.
the class SimplePronounResolution method resolvePronouns.
@Override
protected HashMap<Integer, Integer> resolvePronouns(List<CoreLabel> tokens) {
if (pipeline == null) {
loadPipeline();
}
CoreMap sentence = new CoreLabel();
sentence.set(CoreAnnotations.TokensAnnotation.class, tokens);
sentence.set(CoreAnnotations.SentenceIndexAnnotation.class, 1);
List<CoreMap> sentences = new ArrayList<CoreMap>(1);
sentences.add(sentence);
Annotation annotation = new Annotation(sentences);
pipeline.annotate(annotation);
CoreMap annotatedSentence = annotation.get(CoreAnnotations.SentencesAnnotation.class).get(0);
SemanticGraph sg = annotatedSentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
return this.resolvePronouns(sg);
}
use of edu.stanford.nlp.pipeline.Annotation in project CoreNLP by stanfordnlp.
the class SceneGraphImageCleaner method lemmatize.
public void lemmatize(SceneGraphImage img) {
StanfordCoreNLP pipeline = getPipeline();
/* attributes */
for (SceneGraphImageAttribute attr : img.attributes) {
String attribute = removeDeterminersAndNumbers(removeFinalPunctuation(attr.attribute));
String sentence = String.format("She is %s .\n", attribute);
Annotation doc = new Annotation(sentence);
pipeline.annotate(doc);
CoreMap sentenceAnn = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
List<CoreLabel> tokens = sentenceAnn.get(CoreAnnotations.TokensAnnotation.class);
attr.attributeGloss = tokens.subList(2, tokens.size() - 1);
String subject = removeDeterminersAndNumbers(removeFinalPunctuation(attr.text[0]));
sentence = String.format("The %s is tall .", subject);
doc = new Annotation(sentence);
pipeline.annotate(doc);
sentenceAnn = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
tokens = sentenceAnn.get(CoreAnnotations.TokensAnnotation.class);
attr.subjectGloss = tokens.subList(1, tokens.size() - 3);
attr.subject.labels.add(attr.subjectGloss);
}
/* relations */
for (SceneGraphImageRelationship reln : img.relationships) {
String object = removeDeterminersAndNumbers(removeFinalPunctuation(reln.text[2]));
String sentence = String.format("She is the %s .\n", object);
Annotation doc = new Annotation(sentence);
pipeline.annotate(doc);
CoreMap sentenceAnn = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
List<CoreLabel> tokens = sentenceAnn.get(CoreAnnotations.TokensAnnotation.class);
reln.objectGloss = tokens.subList(3, tokens.size() - 1);
reln.object.labels.add(reln.objectGloss);
String subject = removeDeterminersAndNumbers(removeFinalPunctuation(reln.text[0]));
sentence = String.format("The %s is tall .", subject);
doc = new Annotation(sentence);
pipeline.annotate(doc);
sentenceAnn = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
tokens = sentenceAnn.get(CoreAnnotations.TokensAnnotation.class);
reln.subjectGloss = tokens.subList(1, tokens.size() - 3);
reln.subject.labels.add(reln.subjectGloss);
String predicate = removeDeterminersAndNumbers(removeFinalPunctuation(reln.predicate));
sentence = String.format("A horse %s an apple .", predicate);
doc = new Annotation(sentence);
pipeline.annotate(doc);
sentenceAnn = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
tokens = sentenceAnn.get(CoreAnnotations.TokensAnnotation.class);
reln.predicateGloss = tokens.subList(2, tokens.size() - 3);
}
for (SceneGraphImageObject object : img.objects) {
if (object.names.size() > object.labels.size()) {
for (String name : object.names) {
String x = removeDeterminersAndNumbers(removeFinalPunctuation(name));
String sentence = String.format("The %s is tall .", x);
Annotation doc = new Annotation(sentence);
pipeline.annotate(doc);
CoreMap sentenceAnn = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
List<CoreLabel> tokens = sentenceAnn.get(CoreAnnotations.TokensAnnotation.class);
object.labels.add(tokens.subList(1, tokens.size() - 3));
}
}
}
StanfordCoreNLP tokenizerPipeline = getTokenizerPipeline();
for (SceneGraphImageRegion region : img.regions) {
Annotation doc = new Annotation(region.phrase.toLowerCase());
tokenizerPipeline.annotate(doc);
CoreMap sentenceAnn = doc.get(CoreAnnotations.SentencesAnnotation.class).get(0);
region.tokens = sentenceAnn.get(CoreAnnotations.TokensAnnotation.class);
}
}
use of edu.stanford.nlp.pipeline.Annotation in project CoreNLP by stanfordnlp.
the class CorefMentionFinder method parse.
private Tree parse(List<CoreLabel> tokens, List<ParserConstraint> constraints) {
CoreMap sent = new Annotation("");
sent.set(CoreAnnotations.TokensAnnotation.class, tokens);
sent.set(ParserAnnotations.ConstraintAnnotation.class, constraints);
Annotation doc = new Annotation("");
List<CoreMap> sents = new ArrayList<>(1);
sents.add(sent);
doc.set(CoreAnnotations.SentencesAnnotation.class, sents);
getParser().annotate(doc);
sents = doc.get(CoreAnnotations.SentencesAnnotation.class);
return sents.get(0).get(TreeCoreAnnotations.TreeAnnotation.class);
}
Aggregations