Search in sources :

Example 21 with AnalysisEngineDescription

use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.

the class KeywordNGramTest method initialize.

private List<Instance> initialize(boolean includeComma, boolean markSentenceLocation) throws Exception {
    File luceneFolder = folder.newFolder();
    File outputPath = folder.newFolder();
    Object[] parameters = new Object[] { KeywordNGram.PARAM_UNIQUE_EXTRACTOR_NAME, "123", KeywordNGram.PARAM_NGRAM_KEYWORDS_FILE, "src/test/resources/data/keywordlist.txt", KeywordNGram.PARAM_SOURCE_LOCATION, luceneFolder, KeywordNGramMC.PARAM_TARGET_LOCATION, luceneFolder, KeywordNGram.PARAM_KEYWORD_NGRAM_MARK_SENTENCE_LOCATION, markSentenceLocation, KeywordNGram.PARAM_KEYWORD_NGRAM_INCLUDE_COMMAS, includeComma };
    CollectionReaderDescription reader = CollectionReaderFactory.createReaderDescription(TestReaderSingleLabelDocumentReader.class, TestReaderSingleLabelDocumentReader.PARAM_SOURCE_LOCATION, "src/test/resources/ngrams/trees.txt");
    AnalysisEngineDescription segmenter = AnalysisEngineFactory.createEngineDescription(BreakIteratorSegmenter.class);
    AnalysisEngineDescription metaCollector = AnalysisEngineFactory.createEngineDescription(KeywordNGramMC.class, parameters);
    ExternalResourceDescription featureExtractor = ExternalResourceFactory.createExternalResourceDescription(KeywordNGram.class, toString(parameters));
    List<ExternalResourceDescription> fes = new ArrayList<>();
    fes.add(featureExtractor);
    AnalysisEngineDescription featExtractorConnector = TaskUtils.getFeatureExtractorConnector(outputPath.getAbsolutePath(), JsonDataWriter.class.getName(), Constants.LM_SINGLE_LABEL, Constants.FM_DOCUMENT, false, false, false, false, Collections.emptyList(), fes, new String[] {});
    // run meta collector
    SimplePipeline.runPipeline(reader, segmenter, metaCollector);
    // run FE(s)
    SimplePipeline.runPipeline(reader, segmenter, featExtractorConnector);
    Gson gson = new Gson();
    List<String> lines = FileUtils.readLines(new File(outputPath, JsonDataWriter.JSON_FILE_NAME), "utf-8");
    List<Instance> instances = new ArrayList<>();
    for (String l : lines) {
        instances.add(gson.fromJson(l, Instance.class));
    }
    assertEquals(1, instances.size());
    return instances;
}
Also used : JsonDataWriter(org.dkpro.tc.core.io.JsonDataWriter) Instance(org.dkpro.tc.api.features.Instance) ArrayList(java.util.ArrayList) Gson(com.google.gson.Gson) CollectionReaderDescription(org.apache.uima.collection.CollectionReaderDescription) AnalysisEngineDescription(org.apache.uima.analysis_engine.AnalysisEngineDescription) File(java.io.File) ExternalResourceDescription(org.apache.uima.resource.ExternalResourceDescription)

Example 22 with AnalysisEngineDescription

use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.

the class LuceneNGramCPMetaCollectorTest method combinedNgramPairMetaCollectorTest.

@Test
public void combinedNgramPairMetaCollectorTest() throws Exception {
    File tmpDir = folder.newFolder();
    CollectionReaderDescription reader = CollectionReaderFactory.createReaderDescription(TestPairReader.class, TestPairReader.PARAM_INPUT_FILE, "src/test/resources/data/textpairs.txt");
    AnalysisEngineDescription segmenter = AnalysisEngineFactory.createEngineDescription(BreakIteratorSegmenter.class);
    AnalysisEngineDescription doc = AnalysisEngineFactory.createEngineDescription(DocumentModeAnnotator.class, DocumentModeAnnotator.PARAM_FEATURE_MODE, Constants.FM_PAIR);
    AggregateBuilder builder = new AggregateBuilder();
    builder.add(segmenter, Constants.INITIAL_VIEW, Constants.PART_ONE);
    builder.add(doc, Constants.INITIAL_VIEW, Constants.PART_ONE);
    builder.add(segmenter, Constants.INITIAL_VIEW, Constants.PART_TWO);
    builder.add(doc, Constants.INITIAL_VIEW, Constants.PART_TWO);
    AnalysisEngineDescription metaCollector = AnalysisEngineFactory.createEngineDescription(LuceneNGramCPMetaCollector.class, LuceneNGramCPFE.PARAM_UNIQUE_EXTRACTOR_NAME, "123", LuceneNGramCPFE.PARAM_SOURCE_LOCATION, tmpDir, LuceneNGramPMetaCollector.PARAM_TARGET_LOCATION, tmpDir);
    // test fails if for-loop removed
    for (@SuppressWarnings("unused") JCas jcas : new JCasIterable(reader, builder.createAggregateDescription(), metaCollector)) {
    // System.out.println(jcas.getDocumentText().length());
    }
    int i = 0;
    IndexReader index;
    try {
        index = DirectoryReader.open(FSDirectory.open(tmpDir));
        Fields fields = MultiFields.getFields(index);
        if (fields != null) {
            Terms terms = fields.terms(LuceneNGramCPFE.LUCENE_NGRAM_FIELDCOMBO);
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    // then this would be relevant
                    if (text.utf8ToString().equals("mice_ANDcats_.")) {
                        assertEquals(1, termsEnum.docFreq());
                        assertEquals(1, termsEnum.totalTermFreq());
                    }
                    i++;
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }
    assertEquals(65, i);
}
Also used : JCasIterable(org.apache.uima.fit.pipeline.JCasIterable) Terms(org.apache.lucene.index.Terms) JCas(org.apache.uima.jcas.JCas) ResourceInitializationException(org.apache.uima.resource.ResourceInitializationException) TermsEnum(org.apache.lucene.index.TermsEnum) CollectionReaderDescription(org.apache.uima.collection.CollectionReaderDescription) Fields(org.apache.lucene.index.Fields) MultiFields(org.apache.lucene.index.MultiFields) AggregateBuilder(org.apache.uima.fit.factory.AggregateBuilder) ResourceInitializationException(org.apache.uima.resource.ResourceInitializationException) AnalysisEngineDescription(org.apache.uima.analysis_engine.AnalysisEngineDescription) IndexReader(org.apache.lucene.index.IndexReader) File(java.io.File) BytesRef(org.apache.lucene.util.BytesRef) Test(org.junit.Test)

Example 23 with AnalysisEngineDescription

use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.

the class PPipelineTestBase method runPipeline.

protected void runPipeline() throws Exception {
    List<Object> parameterList = new ArrayList<Object>(Arrays.asList(parameters));
    CollectionReaderDescription reader = CollectionReaderFactory.createReaderDescription(TestPairReader.class, TestPairReader.PARAM_INPUT_FILE, setTestPairsLocation());
    AnalysisEngineDescription segmenter = AnalysisEngineFactory.createEngineDescription(BreakIteratorSegmenter.class);
    AggregateBuilder builder = new AggregateBuilder();
    builder.add(segmenter, Constants.INITIAL_VIEW, Constants.PART_ONE);
    builder.add(segmenter, Constants.INITIAL_VIEW, Constants.PART_TWO);
    getMetaCollector(parameterList);
    getFeatureExtractorCollector(parameterList);
    // run meta collector
    SimplePipeline.runPipeline(reader, builder.createAggregateDescription(), metaCollector);
    // run FE(s)
    SimplePipeline.runPipeline(reader, builder.createAggregateDescription(), featExtractorConnector);
    Gson gson = new Gson();
    List<String> lines = FileUtils.readLines(new File(outputPath, JsonDataWriter.JSON_FILE_NAME), "utf-8");
    for (String l : lines) {
        instanceList.add(gson.fromJson(l, Instance.class));
    }
    assertEquals(1, lines.size());
    assertEquals(1, getUniqueOutcomes(instanceList).size());
    featureNames = getFeatureNames(instanceList);
    for (int i = 0; i < instanceList.size(); i++) {
        outcomeList.add(instanceList.get(i).getOutcomes());
    }
}
Also used : CollectionReaderDescription(org.apache.uima.collection.CollectionReaderDescription) AggregateBuilder(org.apache.uima.fit.factory.AggregateBuilder) Instance(org.dkpro.tc.api.features.Instance) ArrayList(java.util.ArrayList) AnalysisEngineDescription(org.apache.uima.analysis_engine.AnalysisEngineDescription) Gson(com.google.gson.Gson) File(java.io.File)

Example 24 with AnalysisEngineDescription

use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.

the class LuceneNGramPMetaCollectorTest method lucenePairNgramMetaCollectorTest.

@Test
public void lucenePairNgramMetaCollectorTest() throws Exception {
    File tmpDir = folder.newFolder();
    CollectionReaderDescription reader = CollectionReaderFactory.createReaderDescription(TestPairReader.class, TestPairReader.PARAM_INPUT_FILE, "src/test/resources/data/textpairs.txt");
    AnalysisEngineDescription segmenter = AnalysisEngineFactory.createEngineDescription(BreakIteratorSegmenter.class);
    AnalysisEngineDescription doc = AnalysisEngineFactory.createEngineDescription(DocumentModeAnnotator.class, DocumentModeAnnotator.PARAM_FEATURE_MODE, Constants.FM_PAIR);
    AggregateBuilder builder = new AggregateBuilder();
    builder.add(segmenter, Constants.INITIAL_VIEW, Constants.PART_ONE);
    builder.add(doc, Constants.INITIAL_VIEW, Constants.PART_ONE);
    builder.add(segmenter, Constants.INITIAL_VIEW, Constants.PART_TWO);
    builder.add(doc, Constants.INITIAL_VIEW, Constants.PART_TWO);
    AnalysisEngineDescription metaCollector = AnalysisEngineFactory.createEngineDescription(LuceneNGramPMetaCollector.class, LuceneNGramPFE.PARAM_UNIQUE_EXTRACTOR_NAME, "123", LuceneNGramPFE.PARAM_SOURCE_LOCATION, tmpDir, LuceneNGramPMetaCollector.PARAM_TARGET_LOCATION, tmpDir);
    // test fails if for-loop removed
    for (@SuppressWarnings("unused") JCas jcas : new JCasIterable(reader, builder.createAggregateDescription(), metaCollector)) {
    // System.out.println(jcas.getDocumentText().length());
    }
    int i = 0;
    IndexReader index;
    try {
        index = DirectoryReader.open(FSDirectory.open(tmpDir));
        Fields fields = MultiFields.getFields(index);
        if (fields != null) {
            Terms terms = fields.terms(WordNGram.LUCENE_NGRAM_FIELD);
            if (terms != null) {
                TermsEnum termsEnum = terms.iterator(null);
                BytesRef text = null;
                while ((text = termsEnum.next()) != null) {
                    if (text.utf8ToString().equals("this")) {
                        assertEquals(2, termsEnum.docFreq());
                        assertEquals(3, termsEnum.totalTermFreq());
                    }
                    i++;
                }
            }
        }
    } catch (Exception e) {
        throw new ResourceInitializationException(e);
    }
    assertEquals(16, i);
}
Also used : JCasIterable(org.apache.uima.fit.pipeline.JCasIterable) Terms(org.apache.lucene.index.Terms) JCas(org.apache.uima.jcas.JCas) ResourceInitializationException(org.apache.uima.resource.ResourceInitializationException) TermsEnum(org.apache.lucene.index.TermsEnum) CollectionReaderDescription(org.apache.uima.collection.CollectionReaderDescription) Fields(org.apache.lucene.index.Fields) MultiFields(org.apache.lucene.index.MultiFields) AggregateBuilder(org.apache.uima.fit.factory.AggregateBuilder) ResourceInitializationException(org.apache.uima.resource.ResourceInitializationException) AnalysisEngineDescription(org.apache.uima.analysis_engine.AnalysisEngineDescription) IndexReader(org.apache.lucene.index.IndexReader) File(java.io.File) BytesRef(org.apache.lucene.util.BytesRef) Test(org.junit.Test)

Example 25 with AnalysisEngineDescription

use of org.apache.uima.analysis_engine.AnalysisEngineDescription in project dkpro-tc by dkpro.

the class TypeTokenPairFeatureExtractorTest method setUp.

@Before
public void setUp() throws ResourceInitializationException, AnalysisEngineProcessException {
    AnalysisEngineDescription desc = createEngineDescription(BreakIteratorSegmenter.class);
    AnalysisEngine engine = createEngine(desc);
    jcas1 = engine.newJCas();
    jcas1.setDocumentLanguage("en");
    jcas1.setDocumentText("This is text");
    engine.process(jcas1);
    Lemma lemma1 = new Lemma(jcas1, 0, 4);
    lemma1.setValue("text");
    lemma1.addToIndexes();
    Lemma lemma2 = new Lemma(jcas1, 5, 7);
    lemma2.setValue("is");
    lemma2.addToIndexes();
    Lemma lemma3 = new Lemma(jcas1, 8, 10);
    lemma3.setValue("text");
    lemma3.addToIndexes();
    jcas2 = engine.newJCas();
    jcas2.setDocumentLanguage("en");
    jcas2.setDocumentText("Text is text");
    engine.process(jcas2);
    Lemma lemma4 = new Lemma(jcas2, 0, 4);
    lemma4.setValue("text");
    lemma4.addToIndexes();
    Lemma lemma5 = new Lemma(jcas2, 8, 10);
    lemma5.setValue("text");
    lemma5.addToIndexes();
}
Also used : Lemma(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Lemma) AnalysisEngineDescription(org.apache.uima.analysis_engine.AnalysisEngineDescription) AnalysisEngine(org.apache.uima.analysis_engine.AnalysisEngine) Before(org.junit.Before)

Aggregations

AnalysisEngineDescription (org.apache.uima.analysis_engine.AnalysisEngineDescription)62 Test (org.junit.Test)32 File (java.io.File)27 CollectionReaderDescription (org.apache.uima.collection.CollectionReaderDescription)25 ArrayList (java.util.ArrayList)22 AnalysisEngine (org.apache.uima.analysis_engine.AnalysisEngine)18 JCas (org.apache.uima.jcas.JCas)16 Feature (org.dkpro.tc.api.features.Feature)13 FeatureTestUtil.assertFeature (org.dkpro.tc.testing.FeatureTestUtil.assertFeature)11 ExternalResourceDescription (org.apache.uima.resource.ExternalResourceDescription)10 AggregateBuilder (org.apache.uima.fit.factory.AggregateBuilder)8 ResourceInitializationException (org.apache.uima.resource.ResourceInitializationException)8 JsonDataWriter (org.dkpro.tc.core.io.JsonDataWriter)8 TextClassificationTarget (org.dkpro.tc.api.type.TextClassificationTarget)7 Gson (com.google.gson.Gson)6 IOException (java.io.IOException)6 Instance (org.dkpro.tc.api.features.Instance)6 OpenNlpPosTagger (de.tudarmstadt.ukp.dkpro.core.opennlp.OpenNlpPosTagger)4 BreakIteratorSegmenter (de.tudarmstadt.ukp.dkpro.core.tokit.BreakIteratorSegmenter)4 CAS (org.apache.uima.cas.CAS)4