use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class ExtractorMerger method annotate.
@Override
public void annotate(Annotation dataset) {
// TODO for now, we only merge RelationMentions
logger.info("Extractor 0 annotating dataset.");
extractors[0].annotate(dataset);
// store all the RelationMentions per sentence
List<Set<RelationMention>> allRelationMentions = new ArrayList<>();
for (CoreMap sentence : dataset.get(CoreAnnotations.SentencesAnnotation.class)) {
List<RelationMention> relationMentions = sentence.get(MachineReadingAnnotations.RelationMentionsAnnotation.class);
Set<RelationMention> uniqueRelationMentions = new HashSet<>(relationMentions);
allRelationMentions.add(uniqueRelationMentions);
}
// skip first extractor since we did it at the top
for (int extractorIndex = 1; extractorIndex < extractors.length; extractorIndex++) {
logger.info("Extractor " + extractorIndex + " annotating dataset.");
Extractor extractor = extractors[extractorIndex];
extractor.annotate(dataset);
// walk through all sentences and merge our RelationMentions with the combined set
int sentenceIndex = 0;
for (CoreMap sentence : dataset.get(CoreAnnotations.SentencesAnnotation.class)) {
List<RelationMention> relationMentions = sentence.get(MachineReadingAnnotations.RelationMentionsAnnotation.class);
allRelationMentions.get(sentenceIndex).addAll(relationMentions);
}
}
// put all merged relations back into the dataset
int sentenceIndex = 0;
for (CoreMap sentence : dataset.get(CoreAnnotations.SentencesAnnotation.class)) {
Set<RelationMention> uniqueRelationMentions = allRelationMentions.get(sentenceIndex);
List<RelationMention> relationMentions = new ArrayList<>(uniqueRelationMentions);
sentence.set(MachineReadingAnnotations.RelationMentionsAnnotation.class, relationMentions);
sentenceIndex++;
}
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class RelationExtractorResultsPrinter method printResults.
@Override
public void printResults(PrintWriter pw, List<CoreMap> goldStandard, List<CoreMap> extractorOutput) {
ResultsPrinter.align(goldStandard, extractorOutput);
// the mention factory cannot be null here
assert relationMentionFactory != null : "ERROR: RelationExtractorResultsPrinter.relationMentionFactory cannot be null in printResults!";
// Count predicted-actual relation type pairs
Counter<Pair<String, String>> results = new ClassicCounter<>();
ClassicCounter<String> labelCount = new ClassicCounter<>();
// TODO: assumes binary relations
for (int goldSentenceIndex = 0; goldSentenceIndex < goldStandard.size(); goldSentenceIndex++) {
for (RelationMention goldRelation : AnnotationUtils.getAllRelations(relationMentionFactory, goldStandard.get(goldSentenceIndex), createUnrelatedRelations)) {
CoreMap extractorSentence = extractorOutput.get(goldSentenceIndex);
List<RelationMention> extractorRelations = AnnotationUtils.getRelations(relationMentionFactory, extractorSentence, goldRelation.getArg(0), goldRelation.getArg(1));
labelCount.incrementCount(goldRelation.getType());
for (RelationMention extractorRelation : extractorRelations) {
results.incrementCount(new Pair<>(extractorRelation.getType(), goldRelation.getType()));
}
}
}
printResultsInternal(pw, results, labelCount);
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class ProtobufAnnotationSerializer method toProtoBuilder.
/**
* <p>
* The method to extend by subclasses of the Protobuf Annotator if custom additions are added to Tokens.
* In contrast to {@link ProtobufAnnotationSerializer#toProto(edu.stanford.nlp.ling.CoreLabel)}, this function
* returns a builder that can be extended.
* </p>
*
* @param sentence The sentence to save to a protocol buffer
* @param keysToSerialize A set tracking which keys have been saved. It's important to remove any keys added to the proto
* from this set, as the code tracks annotations to ensure lossless serialization.
*/
@SuppressWarnings("deprecation")
protected CoreNLPProtos.Sentence.Builder toProtoBuilder(CoreMap sentence, Set<Class<?>> keysToSerialize) {
// Error checks
if (sentence instanceof CoreLabel) {
throw new IllegalArgumentException("CoreMap is actually a CoreLabel");
}
CoreNLPProtos.Sentence.Builder builder = CoreNLPProtos.Sentence.newBuilder();
// Remove items serialized elsewhere from the required list
keysToSerialize.remove(TextAnnotation.class);
keysToSerialize.remove(NumerizedTokensAnnotation.class);
// Required fields
builder.setTokenOffsetBegin(getAndRegister(sentence, keysToSerialize, TokenBeginAnnotation.class));
builder.setTokenOffsetEnd(getAndRegister(sentence, keysToSerialize, TokenEndAnnotation.class));
// Get key set of CoreMap
Set<Class<?>> keySet;
if (sentence instanceof ArrayCoreMap) {
keySet = ((ArrayCoreMap) sentence).keySetNotNull();
} else {
keySet = new IdentityHashSet<>(sentence.keySet());
}
// Tokens
if (sentence.containsKey(TokensAnnotation.class)) {
for (CoreLabel tok : sentence.get(TokensAnnotation.class)) {
builder.addToken(toProto(tok));
}
keysToSerialize.remove(TokensAnnotation.class);
}
// Characters
if (sentence.containsKey(SegmenterCoreAnnotations.CharactersAnnotation.class)) {
for (CoreLabel c : sentence.get(SegmenterCoreAnnotations.CharactersAnnotation.class)) {
builder.addCharacter(toProto(c));
}
keysToSerialize.remove(SegmenterCoreAnnotations.CharactersAnnotation.class);
}
// Optional fields
if (keySet.contains(SentenceIndexAnnotation.class)) {
builder.setSentenceIndex(getAndRegister(sentence, keysToSerialize, SentenceIndexAnnotation.class));
}
if (keySet.contains(LineNumberAnnotation.class)) {
builder.setLineNumber(getAndRegister(sentence, keysToSerialize, LineNumberAnnotation.class));
}
if (keySet.contains(CharacterOffsetBeginAnnotation.class)) {
builder.setCharacterOffsetBegin(getAndRegister(sentence, keysToSerialize, CharacterOffsetBeginAnnotation.class));
}
if (keySet.contains(CharacterOffsetEndAnnotation.class)) {
builder.setCharacterOffsetEnd(getAndRegister(sentence, keysToSerialize, CharacterOffsetEndAnnotation.class));
}
if (keySet.contains(TreeAnnotation.class)) {
builder.setParseTree(toProto(getAndRegister(sentence, keysToSerialize, TreeAnnotation.class)));
}
if (keySet.contains(BinarizedTreeAnnotation.class)) {
builder.setBinarizedParseTree(toProto(getAndRegister(sentence, keysToSerialize, BinarizedTreeAnnotation.class)));
}
if (keySet.contains(KBestTreesAnnotation.class)) {
for (Tree tree : sentence.get(KBestTreesAnnotation.class)) {
builder.addKBestParseTrees(toProto(tree));
keysToSerialize.remove(KBestTreesAnnotation.class);
}
}
if (keySet.contains(SentimentCoreAnnotations.SentimentAnnotatedTree.class)) {
builder.setAnnotatedParseTree(toProto(getAndRegister(sentence, keysToSerialize, SentimentCoreAnnotations.SentimentAnnotatedTree.class)));
}
if (keySet.contains(SentimentCoreAnnotations.SentimentClass.class)) {
builder.setSentiment(getAndRegister(sentence, keysToSerialize, SentimentCoreAnnotations.SentimentClass.class));
}
if (keySet.contains(BasicDependenciesAnnotation.class)) {
builder.setBasicDependencies(toProto(getAndRegister(sentence, keysToSerialize, BasicDependenciesAnnotation.class)));
}
if (keySet.contains(CollapsedDependenciesAnnotation.class)) {
builder.setCollapsedDependencies(toProto(getAndRegister(sentence, keysToSerialize, CollapsedDependenciesAnnotation.class)));
}
if (keySet.contains(CollapsedCCProcessedDependenciesAnnotation.class)) {
builder.setCollapsedCCProcessedDependencies(toProto(getAndRegister(sentence, keysToSerialize, CollapsedCCProcessedDependenciesAnnotation.class)));
}
if (keySet.contains(AlternativeDependenciesAnnotation.class)) {
builder.setAlternativeDependencies(toProto(getAndRegister(sentence, keysToSerialize, AlternativeDependenciesAnnotation.class)));
}
if (keySet.contains(EnhancedDependenciesAnnotation.class)) {
builder.setEnhancedDependencies(toProto(getAndRegister(sentence, keysToSerialize, EnhancedDependenciesAnnotation.class)));
}
if (keySet.contains(EnhancedPlusPlusDependenciesAnnotation.class)) {
builder.setEnhancedPlusPlusDependencies(toProto(getAndRegister(sentence, keysToSerialize, EnhancedPlusPlusDependenciesAnnotation.class)));
}
if (keySet.contains(TokensAnnotation.class) && getAndRegister(sentence, keysToSerialize, TokensAnnotation.class).size() > 0 && getAndRegister(sentence, keysToSerialize, TokensAnnotation.class).get(0).containsKey(ParagraphAnnotation.class)) {
builder.setParagraph(getAndRegister(sentence, keysToSerialize, TokensAnnotation.class).get(0).get(ParagraphAnnotation.class));
}
if (keySet.contains(NumerizedTokensAnnotation.class)) {
builder.setHasNumerizedTokensAnnotation(true);
} else {
builder.setHasNumerizedTokensAnnotation(false);
}
if (keySet.contains(NaturalLogicAnnotations.EntailedSentencesAnnotation.class)) {
for (SentenceFragment entailedSentence : getAndRegister(sentence, keysToSerialize, NaturalLogicAnnotations.EntailedSentencesAnnotation.class)) {
builder.addEntailedSentence(toProto(entailedSentence));
}
}
if (keySet.contains(NaturalLogicAnnotations.EntailedClausesAnnotation.class)) {
for (SentenceFragment entailedClause : getAndRegister(sentence, keysToSerialize, NaturalLogicAnnotations.EntailedClausesAnnotation.class)) {
builder.addEntailedClause(toProto(entailedClause));
}
}
if (keySet.contains(NaturalLogicAnnotations.RelationTriplesAnnotation.class)) {
builder.setHasOpenieTriplesAnnotation(true);
for (RelationTriple triple : getAndRegister(sentence, keysToSerialize, NaturalLogicAnnotations.RelationTriplesAnnotation.class)) {
builder.addOpenieTriple(toProto(triple));
}
}
if (keySet.contains(KBPTriplesAnnotation.class)) {
// mark that this sentence has kbp triples, potentially empty list
builder.setHasKBPTriplesAnnotation(true);
// store each of the kbp triples
for (RelationTriple triple : getAndRegister(sentence, keysToSerialize, KBPTriplesAnnotation.class)) {
builder.addKbpTriple(toProto(triple));
}
}
// Non-default annotators
if (keySet.contains(EntityMentionsAnnotation.class)) {
builder.setHasRelationAnnotations(true);
for (EntityMention entity : getAndRegister(sentence, keysToSerialize, EntityMentionsAnnotation.class)) {
builder.addEntity(toProto(entity));
}
} else {
builder.setHasRelationAnnotations(false);
}
if (keySet.contains(RelationMentionsAnnotation.class)) {
if (!builder.getHasRelationAnnotations()) {
throw new IllegalStateException("Registered entity mentions without relation mentions");
}
for (RelationMention relation : getAndRegister(sentence, keysToSerialize, RelationMentionsAnnotation.class)) {
builder.addRelation(toProto(relation));
}
}
// add each of the mentions in the List<Mentions> for this sentence
if (keySet.contains(CorefMentionsAnnotation.class)) {
builder.setHasCorefMentionsAnnotation(true);
for (Mention m : sentence.get(CorefMentionsAnnotation.class)) {
builder.addMentionsForCoref(toProto(m));
}
keysToSerialize.remove(CorefMentionsAnnotation.class);
}
// Entity mentions
if (keySet.contains(MentionsAnnotation.class)) {
for (CoreMap mention : sentence.get(MentionsAnnotation.class)) {
builder.addMentions(toProtoMention(mention));
}
keysToSerialize.remove(MentionsAnnotation.class);
builder.setHasEntityMentionsAnnotation(true);
} else {
builder.setHasEntityMentionsAnnotation(false);
}
// add a sentence id if it exists
if (keySet.contains(SentenceIDAnnotation.class))
builder.setSentenceID(getAndRegister(sentence, keysToSerialize, SentenceIDAnnotation.class));
// add section index
if (keySet.contains(SectionIndexAnnotation.class))
builder.setSectionIndex(getAndRegister(sentence, keysToSerialize, SectionIndexAnnotation.class));
// add section date
if (keySet.contains(SectionDateAnnotation.class))
builder.setSectionDate(getAndRegister(sentence, keysToSerialize, SectionDateAnnotation.class));
// add section name
if (keySet.contains(SectionAnnotation.class))
builder.setSectionName(getAndRegister(sentence, keysToSerialize, SectionAnnotation.class));
// add section author
if (keySet.contains(AuthorAnnotation.class))
builder.setSectionAuthor(getAndRegister(sentence, keysToSerialize, AuthorAnnotation.class));
// add doc id
if (keySet.contains(DocIDAnnotation.class))
builder.setDocID(getAndRegister(sentence, keysToSerialize, DocIDAnnotation.class));
// add boolean flag if sentence is quoted
if (keySet.contains(QuotedAnnotation.class))
builder.setSectionQuoted(getAndRegister(sentence, keysToSerialize, QuotedAnnotation.class));
// quote annotator can also add an "enhanced sentence" if multiple sentences are treated as a single sentence
if (keySet.contains(EnhancedSentenceAnnotation.class)) {
keysToSerialize.remove(EnhancedSentenceAnnotation.class);
CoreMap enhanced = sentence.get(EnhancedSentenceAnnotation.class);
builder.setEnhancedSentence(toProto(enhanced));
}
// add chapter index if there is one
if (keySet.contains(ChapterAnnotator.ChapterAnnotation.class))
builder.setChapterIndex(getAndRegister(sentence, keysToSerialize, ChapterAnnotator.ChapterAnnotation.class));
// add paragraph index info
if (keySet.contains(ParagraphIndexAnnotation.class))
builder.setParagraphIndex(getAndRegister(sentence, keysToSerialize, ParagraphIndexAnnotation.class));
// add speaker annotaiton
if (keySet.contains(SpeakerAnnotation.class)) {
builder.setSpeaker(getAndRegister(sentence, keysToSerialize, SpeakerAnnotation.class));
}
if (keySet.contains(SpeakerTypeAnnotation.class)) {
builder.setSpeakerType(getAndRegister(sentence, keysToSerialize, SpeakerTypeAnnotation.class));
}
// Return
return builder;
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class TextOutputter method print.
/**
* The meat of the outputter.
*/
private static void print(Annotation annotation, PrintWriter pw, Options options) {
double beam = options.relationsBeam;
List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
// Display docid if available
String docId = annotation.get(CoreAnnotations.DocIDAnnotation.class);
if (docId != null) {
List<CoreLabel> tokens = annotation.get(CoreAnnotations.TokensAnnotation.class);
int nSentences = (sentences != null) ? sentences.size() : 0;
int nTokens = (tokens != null) ? tokens.size() : 0;
pw.printf("Document: ID=%s (%d sentences, %d tokens)%n", docId, nSentences, nTokens);
}
// Display doctitle if available
String docTitle = annotation.get(CoreAnnotations.DocTitleAnnotation.class);
if (docTitle != null) {
pw.printf("Document Title: %s%n", docTitle);
}
// Display docdate if available
String docDate = annotation.get(CoreAnnotations.DocDateAnnotation.class);
if (docDate != null) {
pw.printf("Document Date: %s%n", docDate);
}
// Display doctype if available
String docType = annotation.get(CoreAnnotations.DocTypeAnnotation.class);
if (docType != null) {
pw.printf("Document Type: %s%n", docType);
}
// Display docsourcetype if available
String docSourceType = annotation.get(CoreAnnotations.DocSourceTypeAnnotation.class);
if (docSourceType != null) {
pw.printf("Document Source Type: %s%n", docSourceType);
}
// display each sentence in this annotation
if (sentences != null) {
for (int i = 0, sz = sentences.size(); i < sz; i++) {
pw.println();
CoreMap sentence = sentences.get(i);
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
String sentiment = sentence.get(SentimentCoreAnnotations.SentimentClass.class);
String piece;
if (sentiment == null) {
piece = "";
} else {
piece = ", sentiment: " + sentiment;
}
pw.printf("Sentence #%d (%d tokens%s):%n", (i + 1), tokens.size(), piece);
String text = sentence.get(CoreAnnotations.TextAnnotation.class);
pw.println(text);
// display the token-level annotations
String[] tokenAnnotations = { "Text", "PartOfSpeech", "Lemma", "Answer", "NamedEntityTag", "CharacterOffsetBegin", "CharacterOffsetEnd", "NormalizedNamedEntityTag", "CodepointOffsetBegin", "CodepointOffsetEnd", "Timex", "TrueCase", "TrueCaseText", "SentimentClass", "WikipediaEntity" };
pw.println();
pw.println("Tokens:");
for (CoreLabel token : tokens) {
pw.print(token.toShorterString(tokenAnnotations));
pw.println();
}
// display the parse tree for this sentence
Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
if (tree != null) {
pw.println();
pw.println("Constituency parse: ");
options.constituencyTreePrinter.printTree(tree, pw);
}
// display the binary tree for this sentence
Tree binaryTree = sentence.get(TreeCoreAnnotations.BinarizedTreeAnnotation.class);
if (binaryTree != null) {
pw.println();
pw.println("Binary Constituency parse: ");
options.constituencyTreePrinter.printTree(binaryTree, pw);
}
// display sentiment tree if they asked for sentiment
if (!StringUtils.isNullOrEmpty(sentiment)) {
pw.println();
pw.println("Sentiment-annotated binary tree:");
Tree sTree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
if (sTree != null) {
sTree.pennPrint(pw, label -> (label.value() == null) ? "" : (RNNCoreAnnotations.getPredictedClass(label) != -1) ? (label.value() + "|sentiment=" + RNNCoreAnnotations.getPredictedClass(label) + "|prob=" + (String.format("%.3f", RNNCoreAnnotations.getPredictedClassProb(label)))) : label.value());
pw.println();
}
}
// language which doesn't have dependencies, for example.
if (sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class) != null) {
pw.println();
pw.println("Dependency Parse (enhanced plus plus dependencies):");
pw.print(sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class).toList());
}
// display the entity mentions
List<CoreMap> entityMentions = sentence.get(CoreAnnotations.MentionsAnnotation.class);
if (entityMentions != null) {
pw.println();
pw.println("Extracted the following NER entity mentions:");
for (CoreMap entityMention : entityMentions) {
String nerConfidenceEntry;
Map<String, Double> nerConfidences = entityMention.get(CoreAnnotations.NamedEntityTagProbsAnnotation.class);
String nerConfidenceKey = nerConfidences.keySet().size() > 0 ? (String) nerConfidences.keySet().toArray()[0] : "";
if (!nerConfidenceKey.equals("") && !nerConfidenceKey.equals("O"))
nerConfidenceEntry = nerConfidenceKey + ":" + nerConfidences.get(nerConfidenceKey);
else
nerConfidenceEntry = "-";
if (entityMention.get(CoreAnnotations.EntityTypeAnnotation.class) != null) {
pw.println(entityMention.get(CoreAnnotations.TextAnnotation.class) + '\t' + entityMention.get(CoreAnnotations.EntityTypeAnnotation.class) + '\t' + nerConfidenceEntry);
}
}
}
// display MachineReading entities and relations
List<EntityMention> entities = sentence.get(MachineReadingAnnotations.EntityMentionsAnnotation.class);
if (entities != null) {
pw.println();
pw.println("Extracted the following MachineReading entity mentions:");
for (EntityMention e : entities) {
pw.print('\t');
pw.println(e);
}
}
List<RelationMention> relations = sentence.get(MachineReadingAnnotations.RelationMentionsAnnotation.class);
if (relations != null) {
pw.println();
pw.println("Extracted the following MachineReading relation mentions:");
for (RelationMention r : relations) {
if (r.printableObject(beam)) {
pw.println(r);
}
}
}
// display OpenIE triples
Collection<RelationTriple> openieTriples = sentence.get(NaturalLogicAnnotations.RelationTriplesAnnotation.class);
if (openieTriples != null && !openieTriples.isEmpty()) {
pw.println();
pw.println("Extracted the following Open IE triples:");
for (RelationTriple triple : openieTriples) {
pw.println(OpenIE.tripleToString(triple, docId, sentence));
}
}
// display KBP triples
Collection<RelationTriple> kbpTriples = sentence.get(CoreAnnotations.KBPTriplesAnnotation.class);
if (kbpTriples != null && !kbpTriples.isEmpty()) {
pw.println();
pw.println("Extracted the following KBP triples:");
for (RelationTriple triple : kbpTriples) {
pw.println(triple);
}
}
}
} else {
List<CoreLabel> tokens = annotation.get(CoreAnnotations.TokensAnnotation.class);
pw.println("Tokens:");
pw.println(annotation.get(CoreAnnotations.TextAnnotation.class));
for (CoreLabel token : tokens) {
int tokenCharBegin = token.get(CoreAnnotations.CharacterOffsetBeginAnnotation.class);
int tokenCharEnd = token.get(CoreAnnotations.CharacterOffsetEndAnnotation.class);
String extra = "";
Integer codepoint = token.get(CoreAnnotations.CodepointOffsetBeginAnnotation.class);
if (codepoint != null) {
extra = extra + " CodepointOffsetBegin=" + codepoint;
}
codepoint = token.get(CoreAnnotations.CodepointOffsetEndAnnotation.class);
if (codepoint != null) {
extra = extra + " CodepointOffsetEnd=" + codepoint;
}
pw.println("[Text=" + token.word() + " CharacterOffsetBegin=" + tokenCharBegin + " CharacterOffsetEnd=" + tokenCharEnd + extra + ']');
}
}
// display the old-style doc-level coref annotations
// this is not supported anymore!
// String corefAnno = annotation.get(CorefPLAnnotation.class);
// if(corefAnno != null) os.println(corefAnno);
// display the new-style coreference graph
Map<Integer, CorefChain> corefChains = annotation.get(CorefCoreAnnotations.CorefChainAnnotation.class);
if (corefChains != null && sentences != null) {
for (CorefChain chain : corefChains.values()) {
CorefChain.CorefMention representative = chain.getRepresentativeMention();
boolean outputHeading = false;
for (CorefChain.CorefMention mention : chain.getMentionsInTextualOrder()) {
if (mention == representative && (!options.printSingletons || chain.getMentionsInTextualOrder().size() > 1))
continue;
if (!outputHeading) {
outputHeading = true;
pw.println();
pw.println("Coreference set:");
}
// all offsets start at 1!
pw.printf("\t(%d,%d,[%d,%d]) -> (%d,%d,[%d,%d]), that is: \"%s\" -> \"%s\"%n", mention.sentNum, mention.headIndex, mention.startIndex, mention.endIndex, representative.sentNum, representative.headIndex, representative.startIndex, representative.endIndex, mention.mentionSpan, representative.mentionSpan);
}
}
}
// display quotes if available
if (annotation.get(CoreAnnotations.QuotationsAnnotation.class) != null) {
outputQuotes(annotation, pw);
}
pw.flush();
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project cogcomp-nlp by CogComp.
the class StanfordRelationsHandler method addView.
@Override
protected void addView(TextAnnotation ta) throws AnnotatorException {
Annotation document = new Annotation(ta.text);
pipeline.annotate(document);
SpanLabelView vu = new SpanLabelView(viewName, ta);
for (CoreMap sentence : document.get(CoreAnnotations.SentencesAnnotation.class)) {
for (RelationMention rm : sentence.get(MachineReadingAnnotations.RelationMentionsAnnotation.class)) {
if (rm.getType().equals("_NR"))
continue;
Map<String, Double> scores = new HashMap<>();
for (String label : rm.getTypeProbabilities().keySet()) scores.put(label, rm.getTypeProbabilities().getCount(label));
Constituent c1 = createConstituentGivenMention(rm.getEntityMentionArgs().get(0), ta);
Constituent c2 = createConstituentGivenMention(rm.getEntityMentionArgs().get(1), ta);
Relation r = new Relation(scores, c1, c2);
vu.addRelation(r);
if (!vu.containsConstituent(c1))
vu.addConstituent(c1);
if (!vu.containsConstituent(c2))
vu.addConstituent(c2);
}
}
for (CoreMap sentence : document.get(CoreAnnotations.SentencesAnnotation.class)) {
for (EntityMention rm : sentence.get(MachineReadingAnnotations.EntityMentionsAnnotation.class)) {
Constituent c = createConstituentGivenMention(rm, ta);
if (!vu.containsConstituent(c))
vu.addConstituent(c);
}
}
ta.addView(viewName, vu);
}
Aggregations