use of com.graphaware.nlp.processor.stanford.StanfordTextProcessor in project neo4j-nlp-stanfordnlp by graphaware.
the class TextProcessorTest method init.
@BeforeClass
public static void init() {
textProcessor = new StanfordTextProcessor();
textProcessor.init();
Map<String, Object> processingSteps = new HashMap<>();
processingSteps.put(AbstractTextProcessor.STEP_TOKENIZE, true);
processingSteps.put(AbstractTextProcessor.STEP_NER, true);
PipelineSpecification pipelineSpecification = new PipelineSpecification("default", StanfordTextProcessor.class.getName(), processingSteps, null, 1L, Collections.emptyList(), Collections.emptyList());
PIPELINE_DEFAULT = pipelineSpecification;
textProcessor.createPipeline(PIPELINE_DEFAULT);
}
use of com.graphaware.nlp.processor.stanford.StanfordTextProcessor in project neo4j-nlp-stanfordnlp by graphaware.
the class DependencyParserTest method testTagMerging.
@Test
public void testTagMerging() throws Exception {
StanfordCoreNLP pipeline = ((StanfordTextProcessor) textProcessor).getPipeline("default");
String text = "Donald Trump flew yesterday to New York City";
AnnotatedText at = textProcessor.annotateText(text, "en", PIPELINE_DEFAULT);
}
use of com.graphaware.nlp.processor.stanford.StanfordTextProcessor in project neo4j-nlp-stanfordnlp by graphaware.
the class DependencyParserTest method testStanfordTypedDependenciesParsing.
@Test
public void testStanfordTypedDependenciesParsing() {
StanfordCoreNLP pipeline = ((StanfordTextProcessor) textProcessor).getPipeline("default");
String text = "Show me Josh Wedhon latest movies";
Annotation document = new Annotation(text);
pipeline.annotate(document);
List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
CoreMap sentence = sentences.get(0);
System.out.println(sentence.toString());
SemanticGraph graph = sentence.get(SemanticGraphCoreAnnotations.EnhancedPlusPlusDependenciesAnnotation.class);
System.out.println(graph);
List<SemanticGraphEdge> edges = graph.edgeListSorted();
for (SemanticGraphEdge edge : edges) {
System.out.println(edge.getRelation().getSpecific());
System.out.println(edge.getRelation().getShortName());
System.out.println(String.format("Source is : %s - Target is : %s - Relation is : %s", edge.getSource(), edge.getTarget(), edge.getRelation()));
}
}
use of com.graphaware.nlp.processor.stanford.StanfordTextProcessor in project neo4j-nlp-stanfordnlp by graphaware.
the class DependencyParserTest method testStanfordNLPWithPredefinedProcessors.
@Test
public void testStanfordNLPWithPredefinedProcessors() throws Exception {
StanfordCoreNLP pipeline = ((StanfordTextProcessor) textProcessor).getPipeline("default");
String text = "Donald Trump flew yesterday to New York City";
AnnotatedText at = textProcessor.annotateText(text, "en", PIPELINE_DEFAULT);
Annotation document = new Annotation(text);
pipeline.annotate(document);
List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
CoreMap sentence = sentences.get(0);
System.out.println(sentence.toString());
SemanticGraph graph = sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class);
System.out.println(graph);
List<SemanticGraphEdge> edges = graph.edgeListSorted();
for (SemanticGraphEdge edge : edges) {
System.out.println(edge.getRelation().getShortName());
System.out.println(String.format("Source is : %s - Target is : %s - Relation is : %s", edge.getSource(), edge.getTarget(), edge.getRelation()));
}
}
use of com.graphaware.nlp.processor.stanford.StanfordTextProcessor in project neo4j-nlp-stanfordnlp by graphaware.
the class DependencyParserTest method testEnhancedDependencyParsingWithQuestion.
@Test
public void testEnhancedDependencyParsingWithQuestion() throws Exception {
String text = "In what area was Frederic born in";
StanfordCoreNLP pipeline = ((StanfordTextProcessor) textProcessor).getPipeline("default");
Map<String, Object> customPipeline = new HashMap<>();
customPipeline.put("textProcessor", "com.graphaware.nlp.processor.stanford.StanfordTextProcessor");
customPipeline.put("name", "custom");
customPipeline.put("stopWords", "start,starts");
customPipeline.put("processingSteps", Collections.singletonMap("dependency", true));
PipelineSpecification pipelineSpecification = PipelineSpecification.fromMap(customPipeline);
((StanfordTextProcessor) textProcessor).createPipeline(pipelineSpecification);
textProcessor.annotateText(text, "en", pipelineSpecification);
Annotation document = new Annotation(text);
pipeline.annotate(document);
List<CoreMap> sentences = document.get(CoreAnnotations.SentencesAnnotation.class);
for (CoreMap sentence : sentences) {
System.out.println(sentence.toString());
SemanticGraph graph = sentence.get(SemanticGraphCoreAnnotations.EnhancedDependenciesAnnotation.class);
graph.getRoots().forEach(root -> {
System.out.println(root);
});
System.out.println(graph);
for (SemanticGraphEdge edge : graph.edgeListSorted()) {
System.out.println(String.format("Source is : %s - Target is : %s - Relation is : %s", edge.getSource(), edge.getTarget(), edge.getRelation()));
}
}
}
Aggregations