use of edu.stanford.nlp.semgraph.SemanticGraph in project CoreNLP by stanfordnlp.
the class SemgrexTest method testComplicatedGraph.
/**
* Test that governors, dependents, ancestors, descendants are all
* returned with multiplicity 1 if there are multiple paths to the
* same node.
*/
public void testComplicatedGraph() {
SemanticGraph graph = makeComplicatedGraph();
runTest("{} < {word:A}", graph, "B", "C", "D");
runTest("{} > {word:E}", graph, "B", "C", "D");
runTest("{} > {word:J}", graph, "I");
runTest("{} < {word:E}", graph, "F", "G", "I");
runTest("{} < {word:I}", graph, "J");
runTest("{} << {word:A}", graph, "B", "C", "D", "E", "F", "G", "H", "I", "J");
runTest("{} << {word:B}", graph, "E", "F", "G", "H", "I", "J");
runTest("{} << {word:C}", graph, "E", "F", "G", "H", "I", "J");
runTest("{} << {word:D}", graph, "E", "F", "G", "H", "I", "J");
runTest("{} << {word:E}", graph, "F", "G", "H", "I", "J");
runTest("{} << {word:F}", graph, "H", "I", "J");
runTest("{} << {word:G}", graph, "H", "I", "J");
runTest("{} << {word:H}", graph, "I", "J");
runTest("{} << {word:I}", graph, "J");
runTest("{} << {word:J}", graph);
runTest("{} << {word:K}", graph);
runTest("{} >> {word:A}", graph);
runTest("{} >> {word:B}", graph, "A");
runTest("{} >> {word:C}", graph, "A");
runTest("{} >> {word:D}", graph, "A");
runTest("{} >> {word:E}", graph, "A", "B", "C", "D");
runTest("{} >> {word:F}", graph, "A", "B", "C", "D", "E");
runTest("{} >> {word:G}", graph, "A", "B", "C", "D", "E");
runTest("{} >> {word:H}", graph, "A", "B", "C", "D", "E", "F", "G");
runTest("{} >> {word:I}", graph, "A", "B", "C", "D", "E", "F", "G", "H");
runTest("{} >> {word:J}", graph, "A", "B", "C", "D", "E", "F", "G", "H", "I");
runTest("{} >> {word:K}", graph);
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project CoreNLP by stanfordnlp.
the class SemgrexTest method testExactDepthRelations.
public void testExactDepthRelations() {
SemanticGraph graph = makeComplicatedGraph();
runTest("{} 2,3<< {word:A}", graph, "E", "F", "G", "I");
runTest("{} 2,2<< {word:A}", graph, "E");
runTest("{} 1,2<< {word:A}", graph, "B", "C", "D", "E");
runTest("{} 0,2<< {word:A}", graph, "B", "C", "D", "E");
runTest("{} 0,10<< {word:A}", graph, "B", "C", "D", "E", "F", "G", "H", "I", "J");
runTest("{} 0,10>> {word:J}", graph, "A", "B", "C", "D", "E", "F", "G", "H", "I");
runTest("{} 2,3>> {word:J}", graph, "B", "C", "D", "E", "F", "G", "H");
runTest("{} 2,2>> {word:J}", graph, "E", "H");
// use this method to avoid the toString() test, since we expect it
// to use 2,2>> instead of 2>>
runTest(SemgrexPattern.compile("{} 2>> {word:J}"), graph, "E", "H");
runTest("{} 1,2>> {word:J}", graph, "E", "H", "I");
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project CoreNLP by stanfordnlp.
the class SemgrexTest method testNotEquals.
/**
* In this test, the graph should find matches with pairs of nodes
* which are different from each other. Since "muffins" only has
* one dependent, there should not be any matches with "muffins" as
* the head, for example.
*/
public void testNotEquals() {
SemanticGraph graph = SemanticGraph.valueOf("[ate subj>Bill dobj>[muffins compound>blueberry]]");
SemgrexPattern pattern = SemgrexPattern.compile("{} >> {}=a >> {}=b : {}=a !== {}=b");
SemgrexMatcher matcher = pattern.matcher(graph);
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("Bill", matcher.getNode("a").toString());
assertEquals("muffins", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("Bill", matcher.getNode("a").toString());
assertEquals("blueberry", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("muffins", matcher.getNode("a").toString());
assertEquals("Bill", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("muffins", matcher.getNode("a").toString());
assertEquals("blueberry", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("blueberry", matcher.getNode("a").toString());
assertEquals("Bill", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("blueberry", matcher.getNode("a").toString());
assertEquals("muffins", matcher.getNode("b").toString());
assertFalse(matcher.find());
// same as the first test, essentially, but with a more compact expression
pattern = SemgrexPattern.compile("{} >> {}=a >> ({}=b !== {}=a)");
matcher = pattern.matcher(graph);
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("Bill", matcher.getNode("a").toString());
assertEquals("muffins", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("Bill", matcher.getNode("a").toString());
assertEquals("blueberry", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("muffins", matcher.getNode("a").toString());
assertEquals("Bill", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("muffins", matcher.getNode("a").toString());
assertEquals("blueberry", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("blueberry", matcher.getNode("a").toString());
assertEquals("Bill", matcher.getNode("b").toString());
assertTrue(matcher.find());
assertEquals(2, matcher.getNodeNames().size());
assertEquals("ate", matcher.getMatch().toString());
assertEquals("blueberry", matcher.getNode("a").toString());
assertEquals("muffins", matcher.getNode("b").toString());
assertFalse(matcher.find());
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project CoreNLP by stanfordnlp.
the class SemgrexTest method testRelationType.
public void testRelationType() {
SemanticGraph graph = makeComplicatedGraph();
runTest("{} <<mod {}", graph, "B", "E", "F", "G", "H", "I", "I", "J", "J");
runTest("{} >>det {}", graph, "A", "B", "C", "D", "E", "F", "G", "H", "I");
runTest("{} >>det {word:J}", graph, "A", "B", "C", "D", "E", "F", "G", "H", "I");
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project Info-Evaluation by TechnionYP5777.
the class AnalyzeParagraph method AnalyzeArrestsQuery.
public void AnalyzeArrestsQuery() {
/*
* First step is initiating the Stanford CoreNLP pipeline (the pipeline
* will be later used to evaluate the text and annotate it) Pipeline is
* initiated using a Properties object which is used for setting all
* needed entities, annotations, training data and so on, in order to
* customized the pipeline initialization to contains only the models
* you need
*/
final Properties props = new Properties();
/*
* The "annotators" property key tells the pipeline which entities
* should be initiated with our pipeline object, See
* http://nlp.stanford.edu/software/corenlp.shtml for a complete
* reference to the "annotators" values you can set here and what they
* will contribute to the analyzing process
*/
props.put("annotators", "tokenize,ssplit, pos, regexner, parse,lemma,natlog,openie");
final StanfordCoreNLP pipeLine = new StanfordCoreNLP(props);
// inputText will be the text to evaluate in this example
int index = 0;
for (final Element paragraph : this.Paragraphs) {
final String inputText = paragraph.text() + "";
final Annotation document = new Annotation(inputText);
System.out.println(document);
String reason = "";
// more details about the reason. e.g - where it
String details = "";
// happened.
String aux = "";
String prefixDetails = "";
// this string tells us what is the penalty for the arrest.
String penalty = "";
// Finally we use the pipeline to annotate the document we created
pipeLine.annotate(document);
for (final CoreMap sentence : document.get(SentencesAnnotation.class)) {
Sentence sent = new Sentence(sentence);
if (sent.text().contains("sentenced") || sent.text().contains("juried") || sent.text().contains("sent to jail") || sent.text().contains("charged")) {
penalty = ArrestPenalty(sent);
System.out.println("Sentenced for:" + penalty);
}
final SemanticGraph dependencies = sentence.get(CollapsedDependenciesAnnotation.class);
for (final IndexedWord root : dependencies.getRoots()) for (final SemanticGraphEdge edge : dependencies.getOutEdgesSorted(root)) {
final IndexedWord dep = edge.getDependent();
final String rel = edge.getRelation() + "";
if (!"arrested".equals(edge.getGovernor().word()))
switch(rel) {
case "nmod:in":
details += "in" + " " + dep.word() + " ";
break;
case "nmod:during":
details += "during" + " " + dep.word() + " ";
break;
case "nmod:at":
details += "at" + " " + dep.word() + " ";
break;
}
else {
//Finding the reason in the paragraph
if ("advcl".equals(rel) || "advcl:for".equals(rel) || "nmod:for".equals(rel)) {
for (final SemanticGraphEdge keshet : dependencies.getOutEdgesSorted(dep)) {
final String rel2 = keshet.getRelation() + "";
final IndexedWord dep2 = keshet.getDependent();
if ("amod".equals(rel2) || "dobj".equals(rel2)) {
reason += dep2.word() + " ";
try {
prefixDetails = ((sentence + "").substring(dep.beginPosition(), dep2.endPosition()));
} catch (IndexOutOfBoundsException e) {
prefixDetails = sentence + "";
}
}
if ("xcomp".equals(rel2))
aux += " " + dep2.word();
switch(rel2) {
case "nmod:in":
final String longLocation = dep2.word();
details += "in ";
for (final SemanticGraphEdge keshet2 : dependencies.getOutEdgesSorted(dep2)) if ("compound".equals(keshet2.getRelation() + ""))
details += keshet2.getDependent().word() + " ";
details += longLocation;
break;
case "nmod:during":
details += "during" + " " + dep2.word() + " ";
break;
case "nmod:under":
details += "under " + dep2.word() + " ";
break;
case "nmod:of":
details += "of " + dep2.word();
break;
case "nmod:at":
details += "at" + " " + dep2.word() + " ";
break;
}
if ("suspicion".equals(keshet.getSource().word()) && "acl:of".equals(rel2))
details += dep2.word();
}
reason += dep.word();
reason += aux;
}
}
}
if (!"".equals(prefixDetails.trim())) {
this.Information.add(prefixDetails.trim());
System.out.println((this.Information.get(index) + ""));
++index;
}
this.Information.add((reason + " " + details).trim());
System.out.println((this.Information.get(index) + ""));
++index;
}
}
}
Aggregations