use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class RothResultsByRelation method printResults.
@Override
public void printResults(PrintWriter pw, List<CoreMap> goldStandard, List<CoreMap> extractorOutput) {
featureFactory = MachineReading.makeRelationFeatureFactory(MachineReadingProperties.relationFeatureFactoryClass, MachineReadingProperties.relationFeatures, false);
// generic mentions work well in this domain
mentionFactory = new RelationMentionFactory();
ResultsPrinter.align(goldStandard, extractorOutput);
List<RelationMention> relations = new ArrayList<>();
final Map<RelationMention, String> predictions = new HashMap<>();
for (int i = 0; i < goldStandard.size(); i++) {
List<RelationMention> goldRelations = AnnotationUtils.getAllRelations(mentionFactory, goldStandard.get(i), true);
relations.addAll(goldRelations);
for (RelationMention rel : goldRelations) {
predictions.put(rel, AnnotationUtils.getRelation(mentionFactory, extractorOutput.get(i), rel.getArg(0), rel.getArg(1)).getType());
}
}
final Counter<Pair<Pair<String, String>, String>> pathCounts = new ClassicCounter<>();
for (RelationMention rel : relations) {
pathCounts.incrementCount(new Pair<>(new Pair<>(rel.getArg(0).getType(), rel.getArg(1).getType()), featureFactory.getFeature(rel, "dependency_path_lowlevel")));
}
Counter<String> singletonCorrect = new ClassicCounter<>();
Counter<String> singletonPredicted = new ClassicCounter<>();
Counter<String> singletonActual = new ClassicCounter<>();
for (RelationMention rel : relations) {
if (pathCounts.getCount(new Pair<>(new Pair<>(rel.getArg(0).getType(), rel.getArg(1).getType()), featureFactory.getFeature(rel, "dependency_path_lowlevel"))) == 1.0) {
String prediction = predictions.get(rel);
if (prediction.equals(rel.getType())) {
singletonCorrect.incrementCount(prediction);
}
singletonPredicted.incrementCount(prediction);
singletonActual.incrementCount(rel.getType());
}
}
class RelComp implements Comparator<RelationMention> {
@Override
public int compare(RelationMention rel1, RelationMention rel2) {
// Group together actual relations of a type with relations that were
// predicted to be that type
String prediction1 = predictions.get(rel1);
String prediction2 = predictions.get(rel2);
// String rel1group = RelationsSentence.isUnrelatedLabel(rel1.getType())
// ? prediction1 : rel1.getType();
// String rel2group = RelationsSentence.isUnrelatedLabel(rel2.getType())
// ? prediction2 : rel2.getType();
int entComp = (rel1.getArg(0).getType() + rel1.getArg(1).getType()).compareTo(rel2.getArg(0).getType() + rel2.getArg(1).getType());
// int groupComp = rel1group.compareTo(rel2group);
int typeComp = rel1.getType().compareTo(rel2.getType());
int predictionComp = prediction1.compareTo(prediction2);
// int pathComp =
// getFeature(rel1,"generalized_dependency_path").compareTo(getFeature(rel2,"generalized_dependency_path"));
double pathCount1 = pathCounts.getCount(new Pair<>(new Pair<>(rel1.getArg(0).getType(), rel1.getArg(1).getType()), featureFactory.getFeature(rel1, "dependency_path_lowlevel")));
double pathCount2 = pathCounts.getCount(new Pair<>(new Pair<>(rel2.getArg(0).getType(), rel2.getArg(1).getType()), featureFactory.getFeature(rel2, "dependency_path_lowlevel")));
if (entComp != 0) {
return entComp;
// } else if (pathComp != 0) {
// return pathComp;
} else if (pathCount1 < pathCount2) {
return -1;
} else if (pathCount1 > pathCount2) {
return 1;
} else if (typeComp != 0) {
return typeComp;
} else if (predictionComp != 0) {
return predictionComp;
} else {
return rel1.getSentence().get(CoreAnnotations.TextAnnotation.class).compareTo(rel2.getSentence().get(CoreAnnotations.TextAnnotation.class));
}
}
}
RelComp relComp = new RelComp();
Collections.sort(relations, relComp);
for (RelationMention rel : relations) {
String prediction = predictions.get(rel);
// if (RelationsSentence.isUnrelatedLabel(prediction) &&
// RelationsSentence.isUnrelatedLabel(rel.getType())) {
// continue;
// }
String type1 = rel.getArg(0).getType();
String type2 = rel.getArg(1).getType();
String path = featureFactory.getFeature(rel, "dependency_path_lowlevel");
if (!((type1.equals("PEOPLE") && type2.equals("PEOPLE")) || (type1.equals("PEOPLE") && type2.equals("LOCATION")) || (type1.equals("LOCATION") && type2.equals("LOCATION")) || (type1.equals("ORGANIZATION") && type2.equals("LOCATION")) || (type1.equals("PEOPLE") && type2.equals("ORGANIZATION")))) {
continue;
}
if (path.equals("")) {
continue;
}
pw.println("\nLABEL: " + prediction);
pw.println(rel);
pw.println(path);
pw.println(featureFactory.getFeatures(rel, "dependency_path_words"));
pw.println(featureFactory.getFeature(rel, "surface_path_POS"));
}
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class ProtobufAnnotationSerializer method fromProto.
/**
* Read a relation mention from its serialized form. Requires the containing sentence to be
* passed in along with the protocol buffer.
* @param proto The serialized relation mention.
* @param sentence The sentence this mention is attached to.
* @return The relation mention corresponding to the serialized object.
*/
private RelationMention fromProto(CoreNLPProtos.Relation proto, CoreMap sentence) {
List<ExtractionObject> args = proto.getArgList().stream().map(arg -> fromProto(arg, sentence)).collect(Collectors.toList());
RelationMention rtn = new RelationMention(proto.hasObjectID() ? proto.getObjectID() : null, sentence, proto.hasExtentStart() ? new Span(proto.getExtentStart(), proto.getExtentEnd()) : null, proto.hasType() ? proto.getType() : null, proto.hasSubtype() ? proto.getSubtype() : null, args);
if (proto.hasSignature()) {
rtn.setSignature(proto.getSignature());
}
if (proto.getArgNameCount() > 0 || proto.getArgCount() == 0) {
rtn.setArgNames(proto.getArgNameList());
}
return rtn;
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class XMLOutputter method annotationToDoc.
/**
* Converts the given annotation to an XML document using the specified options
*/
public static Document annotationToDoc(Annotation annotation, Options options) {
//
// create the XML document with the root node pointing to the namespace URL
//
Element root = new Element("root", NAMESPACE_URI);
Document xmlDoc = new Document(root);
ProcessingInstruction pi = new ProcessingInstruction("xml-stylesheet", "href=\"" + STYLESHEET_NAME + "\" type=\"text/xsl\"");
xmlDoc.insertChild(pi, 0);
Element docElem = new Element("document", NAMESPACE_URI);
root.appendChild(docElem);
setSingleElement(docElem, "docId", NAMESPACE_URI, annotation.get(CoreAnnotations.DocIDAnnotation.class));
setSingleElement(docElem, "docDate", NAMESPACE_URI, annotation.get(CoreAnnotations.DocDateAnnotation.class));
setSingleElement(docElem, "docSourceType", NAMESPACE_URI, annotation.get(CoreAnnotations.DocSourceTypeAnnotation.class));
setSingleElement(docElem, "docType", NAMESPACE_URI, annotation.get(CoreAnnotations.DocTypeAnnotation.class));
setSingleElement(docElem, "author", NAMESPACE_URI, annotation.get(CoreAnnotations.AuthorAnnotation.class));
setSingleElement(docElem, "location", NAMESPACE_URI, annotation.get(CoreAnnotations.LocationAnnotation.class));
if (options.includeText) {
setSingleElement(docElem, "text", NAMESPACE_URI, annotation.get(CoreAnnotations.TextAnnotation.class));
}
Element sentencesElem = new Element("sentences", NAMESPACE_URI);
docElem.appendChild(sentencesElem);
//
if (annotation.get(CoreAnnotations.SentencesAnnotation.class) != null) {
int sentCount = 1;
for (CoreMap sentence : annotation.get(CoreAnnotations.SentencesAnnotation.class)) {
Element sentElem = new Element("sentence", NAMESPACE_URI);
sentElem.addAttribute(new Attribute("id", Integer.toString(sentCount)));
Integer lineNumber = sentence.get(CoreAnnotations.LineNumberAnnotation.class);
if (lineNumber != null) {
sentElem.addAttribute(new Attribute("line", Integer.toString(lineNumber)));
}
sentCount++;
// add the word table with all token-level annotations
Element wordTable = new Element("tokens", NAMESPACE_URI);
List<CoreLabel> tokens = sentence.get(CoreAnnotations.TokensAnnotation.class);
for (int j = 0; j < tokens.size(); j++) {
Element wordInfo = new Element("token", NAMESPACE_URI);
addWordInfo(wordInfo, tokens.get(j), j + 1, NAMESPACE_URI, options);
wordTable.appendChild(wordInfo);
}
sentElem.appendChild(wordTable);
// add tree info
Tree tree = sentence.get(TreeCoreAnnotations.TreeAnnotation.class);
if (tree != null) {
// add the constituent tree for this sentence
Element parseInfo = new Element("parse", NAMESPACE_URI);
addConstituentTreeInfo(parseInfo, tree, options.constituencyTreePrinter);
sentElem.appendChild(parseInfo);
}
SemanticGraph basicDependencies = sentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
if (basicDependencies != null) {
// add the dependencies for this sentence
Element depInfo = buildDependencyTreeInfo("basic-dependencies", sentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class), tokens, NAMESPACE_URI);
if (depInfo != null) {
sentElem.appendChild(depInfo);
}
depInfo = buildDependencyTreeInfo("collapsed-dependencies", sentence.get(SemanticGraphCoreAnnotations.CollapsedDependenciesAnnotation.class), tokens, NAMESPACE_URI);
if (depInfo != null) {
sentElem.appendChild(depInfo);
}
depInfo = buildDependencyTreeInfo("collapsed-ccprocessed-dependencies", sentence.get(SemanticGraphCoreAnnotations.CollapsedCCProcessedDependenciesAnnotation.class), tokens, NAMESPACE_URI);
if (depInfo != null) {
sentElem.appendChild(depInfo);
}
depInfo = buildDependencyTreeInfo("enhanced-dependencies", sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class), tokens, NAMESPACE_URI);
if (depInfo != null) {
sentElem.appendChild(depInfo);
}
depInfo = buildDependencyTreeInfo("enhanced-plus-plus-dependencies", sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class), tokens, NAMESPACE_URI);
if (depInfo != null) {
sentElem.appendChild(depInfo);
}
}
// add Open IE triples
Collection<RelationTriple> openieTriples = sentence.get(NaturalLogicAnnotations.RelationTriplesAnnotation.class);
if (openieTriples != null) {
Element openieElem = new Element("openie", NAMESPACE_URI);
addTriples(openieTriples, openieElem, NAMESPACE_URI);
sentElem.appendChild(openieElem);
}
// add KBP triples
Collection<RelationTriple> kbpTriples = sentence.get(CoreAnnotations.KBPTriplesAnnotation.class);
if (kbpTriples != null) {
Element kbpElem = new Element("kbp", NAMESPACE_URI);
addTriples(kbpTriples, kbpElem, NAMESPACE_URI);
sentElem.appendChild(kbpElem);
}
// add the MR entities and relations
List<EntityMention> entities = sentence.get(MachineReadingAnnotations.EntityMentionsAnnotation.class);
List<RelationMention> relations = sentence.get(MachineReadingAnnotations.RelationMentionsAnnotation.class);
if (entities != null && !entities.isEmpty()) {
Element mrElem = new Element("MachineReading", NAMESPACE_URI);
Element entElem = new Element("entities", NAMESPACE_URI);
addEntities(entities, entElem, NAMESPACE_URI);
mrElem.appendChild(entElem);
if (relations != null) {
Element relElem = new Element("relations", NAMESPACE_URI);
addRelations(relations, relElem, NAMESPACE_URI, options.relationsBeam);
mrElem.appendChild(relElem);
}
sentElem.appendChild(mrElem);
}
// Adds sentiment as an attribute of this sentence.
Tree sentimentTree = sentence.get(SentimentCoreAnnotations.SentimentAnnotatedTree.class);
if (sentimentTree != null) {
addSentiment(sentence, sentimentTree, sentElem, NAMESPACE_URI);
}
// add the sentence to the root
sentencesElem.appendChild(sentElem);
}
}
//
// add the coref graph
//
Map<Integer, CorefChain> corefChains = annotation.get(CorefCoreAnnotations.CorefChainAnnotation.class);
if (corefChains != null) {
List<CoreMap> sentences = annotation.get(CoreAnnotations.SentencesAnnotation.class);
Element corefInfo = new Element("coreference", NAMESPACE_URI);
addCorefGraphInfo(options, corefInfo, sentences, corefChains, NAMESPACE_URI);
docElem.appendChild(corefInfo);
}
return xmlDoc;
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class XMLOutputter method addRelations.
/**
* Generates the XML content for MachineReading relations.
*/
private static void addRelations(List<RelationMention> relations, Element top, String curNS, double beam) {
for (RelationMention r : relations) {
if (r.printableObject(beam)) {
Element re = toXML(r, curNS);
top.appendChild(re);
}
}
}
use of edu.stanford.nlp.ie.machinereading.structure.RelationMention in project CoreNLP by stanfordnlp.
the class RelationExtractorAnnotator method main.
public static void main(String[] args) {
try {
Properties props = StringUtils.argsToProperties(args);
props.setProperty("annotators", "tokenize,ssplit,lemma,pos,parse,ner");
StanfordCoreNLP pipeline = new StanfordCoreNLP();
String sentence = "Barack Obama lives in America. Obama works for the Federal Goverment.";
Annotation doc = new Annotation(sentence);
pipeline.annotate(doc);
RelationExtractorAnnotator r = new RelationExtractorAnnotator(props);
r.annotate(doc);
for (CoreMap s : doc.get(CoreAnnotations.SentencesAnnotation.class)) {
System.out.println("For sentence " + s.get(CoreAnnotations.TextAnnotation.class));
List<RelationMention> rls = s.get(RelationMentionsAnnotation.class);
for (RelationMention rl : rls) {
System.out.println(rl.toString());
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
Aggregations