use of edu.stanford.nlp.semgraph.SemanticGraph in project Info-Evaluation by TechnionYP5777.
the class AnalyzeParagragh method Analyze.
public TableTuple Analyze() {
/*
* First step is initiating the Stanford CoreNLP pipeline (the pipeline
* will be later used to evaluate the text and annotate it) Pipeline is
* initiated using a Properties object which is used for setting all
* needed entities, annotations, training data and so on, in order to
* customized the pipeline initialization to contains only the models
* you need
*/
final Properties props = new Properties();
/*
* The "annotators" property key tells the pipeline which entities
* should be initiated with our pipeline object, See
* http://nlp.stanford.edu/software/corenlp.shtml for a complete
* reference to the "annotators" values you can set here and what they
* will contribute to the analyzing process
*/
props.put("annotators", "tokenize,ssplit, pos, regexner, parse,lemma,natlog,openie");
final StanfordCoreNLP pipeLine = new StanfordCoreNLP(props);
// inputText will be the text to evaluate in this example
final String inputText = input + "";
final Annotation document = new Annotation(inputText);
// Finally we use the pipeline to annotate the document we created
pipeLine.annotate(document);
final String $ = getName();
final String input_date = getDate(year);
String reason = "";
// more details about the reason. e.g - where it
String details = "";
// happened.
String aux = "";
String accurate_name = "";
for (final CoreMap sentence : document.get(SentencesAnnotation.class)) {
final SemanticGraph dependencies = sentence.get(CollapsedDependenciesAnnotation.class);
for (final IndexedWord root : dependencies.getRoots()) for (final SemanticGraphEdge edge : dependencies.getOutEdgesSorted(root)) {
final IndexedWord dep = edge.getDependent();
final String rel = edge.getRelation() + "";
if (!"arrested".equals(edge.getGovernor().word()))
switch(rel) {
case "nmod:in":
details += "in" + " " + dep.word() + " ";
break;
case "nmod:during":
details += "during" + " " + dep.word() + " ";
break;
case "nmod:at":
details += "at" + " " + dep.word() + " ";
break;
}
else {
//Finding the name in a more accurate manner:
if ("nsubjpass".equals(rel)) {
for (final SemanticGraphEdge keshet : dependencies.getOutEdgesSorted(dep)) {
final IndexedWord dep2 = keshet.getDependent();
final String rel2 = keshet.getRelation() + "";
if ((dep2.ner() != null && "PERSON".equals(dep2.ner())) || "compound".equals(rel2) || "det".equals(rel2))
accurate_name += dep2.word() + " ";
}
accurate_name += dep.word();
}
//Finding the reason in the paragraph
if ("advcl".equals(rel) || "advcl:for".equals(rel) || "nmod:for".equals(rel)) {
for (final SemanticGraphEdge keshet : dependencies.getOutEdgesSorted(dep)) {
final String rel2 = keshet.getRelation() + "";
final IndexedWord dep2 = keshet.getDependent();
if ("amod".equals(rel2) || "dobj".equals(rel2))
reason += dep2.word() + " ";
if ("xcomp".equals(rel2))
aux += " " + dep2.word();
switch(rel2) {
case "nmod:in":
final String longLocation = dep2.word();
details += "in ";
for (final SemanticGraphEdge keshet2 : dependencies.getOutEdgesSorted(dep2)) if ("compound".equals(keshet2.getRelation() + ""))
details += keshet2.getDependent().word() + " ";
details += longLocation;
break;
case "nmod:during":
details += "during" + " " + dep2.word() + " ";
break;
case "nmod:under":
details += "under " + dep2.word() + " ";
break;
case "nmod:of":
details += "of " + dep2.word();
break;
case "nmod:at":
details += "at" + " " + dep2.word() + " ";
break;
}
if ("suspicion".equals(keshet.getSource().word()) && "acl:of".equals(rel2))
details += dep2.word();
}
reason += dep.word();
reason += aux;
}
}
}
}
return new TableTuple(accurate_name.isEmpty() ? $ : accurate_name, input_date, (reason + " " + details).trim());
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project cogcomp-nlp by CogComp.
the class StanfordDepHandler method addView.
@Override
public void addView(TextAnnotation textAnnotation) throws AnnotatorException {
// If the sentence is longer than STFRD_MAX_SENTENCE_LENGTH there is no point in trying to
// parse
StanfordParseHandler.checkLength(textAnnotation, throwExceptionOnSentenceLengthCheck, maxParseSentenceLength);
TreeView treeView = new TreeView(ViewNames.DEPENDENCY_STANFORD, "StanfordDepHandler", textAnnotation, 1d);
// The (tokenized) sentence offset in case we have more than one sentences in the record
List<CoreMap> sentences = StanfordParseHandler.buildStanfordSentences(textAnnotation);
Annotation document = new Annotation(sentences);
posAnnotator.annotate(document);
parseAnnotator.annotate(document);
sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
if (sentences.get(0).get(TreeCoreAnnotations.TreeAnnotation.class).nodeString().equals("X")) {
// This is most like because we ran out of time
throw new AnnotatorException("Unable to parse TextAnnotation " + textAnnotation.getId() + ". " + "This is most likely due to a timeout.");
}
for (int sentenceId = 0; sentenceId < sentences.size(); sentenceId++) {
boolean runtimeExceptionWasThrown = false;
CoreMap sentence = sentences.get(sentenceId);
if (maxParseSentenceLength > 0 && sentence.size() > maxParseSentenceLength) {
logger.warn(HandlerUtils.getSentenceLengthError(textAnnotation.getId(), sentence.toString(), maxParseSentenceLength));
} else {
SemanticGraph depGraph = sentence.get(SemanticGraphCoreAnnotations.BasicDependenciesAnnotation.class);
IndexedWord root = null;
try {
root = depGraph.getFirstRoot();
} catch (RuntimeException e) {
String msg = "ERROR in getting root of dep graph for sentence. Sentence is:\n" + sentence.toString() + "'\nDependency graph is:\n" + depGraph.toCompactString() + "\nText is:\n" + textAnnotation.getText();
logger.error(msg);
System.err.println(msg);
e.printStackTrace();
if (throwExceptionOnSentenceLengthCheck)
throw e;
else
runtimeExceptionWasThrown = true;
}
if (!runtimeExceptionWasThrown) {
int tokenStart = getNodePosition(textAnnotation, root, sentenceId);
Pair<String, Integer> nodePair = new Pair<>(root.originalText(), tokenStart);
Tree<Pair<String, Integer>> tree = new Tree<>(nodePair);
populateChildren(depGraph, root, tree, textAnnotation, sentenceId);
treeView.setDependencyTree(sentenceId, tree);
}
}
}
textAnnotation.addView(getViewName(), treeView);
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project CoreNLP by stanfordnlp.
the class EnglishGrammaticalStructure method correctWHAttachment.
/**
* Tries to correct complicated cases of WH-movement in
* sentences such as "What does Mary seem to have?" in
* which "What" should attach to "have" instead of the
* control verb.
*
* @param sg The Semantic graph to operate on.
*/
private static void correctWHAttachment(SemanticGraph sg) {
/* Semgrexes require a graph with a root. */
if (sg.getRoots().isEmpty())
return;
SemanticGraph sgCopy = sg.makeSoftCopy();
SemgrexMatcher matcher = XCOMP_PATTERN.matcher(sgCopy);
while (matcher.findNextMatchingNode()) {
IndexedWord root = matcher.getNode("root");
IndexedWord embeddedVerb = matcher.getNode("embedded");
IndexedWord wh = matcher.getNode("wh");
IndexedWord dobj = matcher.getNode("obj");
/* Check if the object is a WH-word. */
if (wh.tag().startsWith("W")) {
boolean reattach = false;
/* If the control verb already has an object, then
we have to reattach th WH-word to the verb in the embedded clause. */
if (dobj != null) {
reattach = true;
} else {
/* If the control verb can't have an object, we also have to reattach. */
String lemma = Morphology.lemmaStatic(root.value(), root.tag());
if (lemma.matches(EnglishPatterns.NP_V_S_INF_VERBS_REGEX)) {
reattach = true;
}
}
if (reattach) {
SemanticGraphEdge edge = sg.getEdge(root, wh);
if (edge != null) {
sg.removeEdge(edge);
sg.addEdge(embeddedVerb, wh, DIRECT_OBJECT, Double.NEGATIVE_INFINITY, false);
}
}
}
}
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project CoreNLP by stanfordnlp.
the class UniversalEnglishGrammaticalStructure method treatCC.
private static void treatCC(SemanticGraph sg) {
// Construct a map from tree nodes to the set of typed
// dependencies in which the node appears as dependent.
Map<IndexedWord, Set<SemanticGraphEdge>> map = Generics.newHashMap();
// Construct a map of tree nodes being governor of a subject grammatical
// relation to that relation
Map<IndexedWord, SemanticGraphEdge> subjectMap = Generics.newHashMap();
// Construct a set of TreeGraphNodes with a passive auxiliary on them
Set<IndexedWord> withPassiveAuxiliary = Generics.newHashSet();
// Construct a map of tree nodes being governor of an object grammatical
// relation to that relation
// Map<TreeGraphNode, TypedDependency> objectMap = new
// HashMap<TreeGraphNode, TypedDependency>();
List<IndexedWord> rcmodHeads = Generics.newArrayList();
List<IndexedWord> prepcDep = Generics.newArrayList();
for (SemanticGraphEdge edge : sg.edgeIterable()) {
if (!map.containsKey(edge.getDependent())) {
// NB: Here and in other places below, we use a TreeSet (which extends
// SortedSet) to guarantee that results are deterministic)
map.put(edge.getDependent(), new TreeSet<>());
}
map.get(edge.getDependent()).add(edge);
if (edge.getRelation().equals(AUX_PASSIVE_MODIFIER)) {
withPassiveAuxiliary.add(edge.getGovernor());
}
// look for subjects
if (edge.getRelation().getParent() == NOMINAL_SUBJECT || edge.getRelation().getParent() == SUBJECT || edge.getRelation().getParent() == CLAUSAL_SUBJECT) {
if (!subjectMap.containsKey(edge.getGovernor())) {
subjectMap.put(edge.getGovernor(), edge);
}
}
// look for rcmod relations
if (edge.getRelation() == RELATIVE_CLAUSE_MODIFIER) {
rcmodHeads.add(edge.getGovernor());
}
// to avoid wrong propagation of dobj
if (edge.getRelation().toString().startsWith("acl:") || edge.getRelation().toString().startsWith("advcl:")) {
prepcDep.add(edge.getDependent());
}
}
// log.info(map);
// if (DEBUG) log.info("Subject map: " + subjectMap);
// if (DEBUG) log.info("Object map: " + objectMap);
// log.info(rcmodHeads);
// create a new list of typed dependencies
//Collection<TypedDependency> newTypedDeps = new ArrayList<TypedDependency>(list);
SemanticGraph sgCopy = sg.makeSoftCopy();
// find typed deps of form conj(gov,dep)
for (SemanticGraphEdge edge : sgCopy.edgeIterable()) {
if (UniversalEnglishGrammaticalRelations.getConjs().contains(edge.getRelation())) {
IndexedWord gov = edge.getGovernor();
IndexedWord dep = edge.getDependent();
// look at the dep in the conjunct
Set<SemanticGraphEdge> gov_relations = map.get(gov);
// log.info("gov " + gov);
if (gov_relations != null) {
for (SemanticGraphEdge edge1 : gov_relations) {
// log.info("gov rel " + td1);
IndexedWord newGov = edge1.getGovernor();
// is possible to have overlapping newGov & dep
if (newGov.equals(dep)) {
continue;
}
GrammaticalRelation newRel = edge1.getRelation();
//TODO: Do we want to copy case markers here?
if (newRel != ROOT && newRel != CASE_MARKER) {
if (rcmodHeads.contains(gov) && rcmodHeads.contains(dep)) {
// to prevent wrong propagation in the case of long dependencies in relative clauses
if (newRel != DIRECT_OBJECT && newRel != NOMINAL_SUBJECT) {
if (DEBUG) {
log.info("Adding new " + newRel + " dependency from " + newGov + " to " + dep + " (subj/obj case)");
}
sg.addEdge(newGov, dep, newRel, Double.NEGATIVE_INFINITY, true);
}
} else {
if (DEBUG) {
log.info("Adding new " + newRel + " dependency from " + newGov + " to " + dep);
}
sg.addEdge(newGov, dep, newRel, Double.NEGATIVE_INFINITY, true);
}
}
}
}
// propagate subjects
// look at the gov in the conjunct: if it is has a subject relation,
// the dep is a verb and the dep doesn't have a subject relation
// then we want to add a subject relation for the dep.
// (By testing for the dep to be a verb, we are going to miss subject of
// copular verbs! but
// is it safe to relax this assumption?? i.e., just test for the subject
// part)
// CDM 2008: I also added in JJ, since participial verbs are often
// tagged JJ
String tag = dep.tag();
if (subjectMap.containsKey(gov) && (tag.startsWith("VB") || tag.startsWith("JJ")) && !subjectMap.containsKey(dep)) {
SemanticGraphEdge tdsubj = subjectMap.get(gov);
// check for wrong nsubjpass: if the new verb is VB or VBZ or VBP or JJ, then
// add nsubj (if it is tagged correctly, should do this for VBD too, but we don't)
GrammaticalRelation relation = tdsubj.getRelation();
if (relation == NOMINAL_PASSIVE_SUBJECT) {
if (isDefinitelyActive(tag)) {
relation = NOMINAL_SUBJECT;
}
} else if (relation == CLAUSAL_PASSIVE_SUBJECT) {
if (isDefinitelyActive(tag)) {
relation = CLAUSAL_SUBJECT;
}
} else if (relation == NOMINAL_SUBJECT) {
if (withPassiveAuxiliary.contains(dep)) {
relation = NOMINAL_PASSIVE_SUBJECT;
}
} else if (relation == CLAUSAL_SUBJECT) {
if (withPassiveAuxiliary.contains(dep)) {
relation = CLAUSAL_PASSIVE_SUBJECT;
}
}
if (DEBUG) {
log.info("Adding new " + relation + " dependency from " + dep + " to " + tdsubj.getDependent() + " (subj propagation case)");
}
sg.addEdge(dep, tdsubj.getDependent(), relation, Double.NEGATIVE_INFINITY, true);
}
// propagate objects
// cdm july 2010: This bit of code would copy a dobj from the first
// clause to a later conjoined clause if it didn't
// contain its own dobj or prepc. But this is too aggressive and wrong
// if the later clause is intransitive
// (including passivized cases) and so I think we have to not have this
// done always, and see no good "sometimes" heuristic.
// IF WE WERE TO REINSTATE, SHOULD ALSO NOT ADD OBJ IF THERE IS A ccomp
// (SBAR).
// if (objectMap.containsKey(gov) &&
// dep.tag().startsWith("VB") && ! objectMap.containsKey(dep)
// && ! prepcDep.contains(gov)) {
// TypedDependency tdobj = objectMap.get(gov);
// if (DEBUG) {
// log.info("Adding new " + tdobj.reln() + " dependency from "
// + dep + " to " + tdobj.dep() + " (obj propagation case)");
// }
// newTypedDeps.add(new TypedDependency(tdobj.reln(), dep,
// tdobj.dep()));
// }
}
}
}
use of edu.stanford.nlp.semgraph.SemanticGraph in project CoreNLP by stanfordnlp.
the class UniversalEnglishGrammaticalStructure method correctSubjPass.
/**
* This method corrects subjects of verbs for which we identified an auxpass,
* but didn't identify the subject as passive.
*
* @param sg SemanticGraph to work on
*/
private static void correctSubjPass(SemanticGraph sg) {
/* If the graph doesn't have a root (most likely because
* a parsing error, we can't match Semgrexes, so do
* nothing. */
if (sg.getRoots().isEmpty())
return;
SemanticGraph sgCopy = sg.makeSoftCopy();
SemgrexMatcher matcher = CORRECT_SUBJPASS_PATTERN.matcher(sgCopy);
while (matcher.find()) {
IndexedWord gov = matcher.getNode("gov");
IndexedWord subj = matcher.getNode("subj");
SemanticGraphEdge edge = sg.getEdge(gov, subj);
GrammaticalRelation reln = null;
if (edge.getRelation() == NOMINAL_SUBJECT) {
reln = NOMINAL_PASSIVE_SUBJECT;
} else if (edge.getRelation() == CLAUSAL_SUBJECT) {
reln = CLAUSAL_PASSIVE_SUBJECT;
} else if (edge.getRelation() == CONTROLLING_NOMINAL_SUBJECT) {
reln = CONTROLLING_NOMINAL_PASSIVE_SUBJECT;
} else if (edge.getRelation() == CONTROLLING_CLAUSAL_SUBJECT) {
reln = CONTROLLING_CLAUSAL_PASSIVE_SUBJECT;
}
if (reln != null) {
sg.removeEdge(edge);
sg.addEdge(gov, subj, reln, Double.NEGATIVE_INFINITY, false);
}
}
}
Aggregations