Search in sources :

Example 1 with POS

use of de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS in project webanno by webanno.

the class PartOfSpeechLayerInitializer method configure.

@Override
public void configure(Project aProject) throws IOException {
    TagSet posTagSet = JsonImportUtil.importTagSetFromJson(aProject, new ClassPathResource("/tagsets/mul-pos-ud.json").getInputStream(), annotationSchemaService);
    AnnotationLayer tokenLayer = annotationSchemaService.getLayer(Token.class.getName(), aProject);
    AnnotationLayer posLayer = new AnnotationLayer(POS.class.getName(), "POS", SPAN_TYPE, aProject, true);
    AnnotationFeature tokenPosFeature = new AnnotationFeature(aProject, tokenLayer, "pos", "pos", POS.class.getName());
    annotationSchemaService.createFeature(tokenPosFeature);
    posLayer.setAttachType(tokenLayer);
    posLayer.setAttachFeature(tokenPosFeature);
    annotationSchemaService.createLayer(posLayer);
    annotationSchemaService.createFeature(new AnnotationFeature(aProject, posLayer, "PosValue", "PosValue", CAS.TYPE_NAME_STRING, "Part-of-speech tag", posTagSet));
}
Also used : POS(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS) TagSet(de.tudarmstadt.ukp.clarin.webanno.model.TagSet) Token(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token) AnnotationLayer(de.tudarmstadt.ukp.clarin.webanno.model.AnnotationLayer) ClassPathResource(org.springframework.core.io.ClassPathResource) AnnotationFeature(de.tudarmstadt.ukp.clarin.webanno.model.AnnotationFeature)

Example 2 with POS

use of de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS in project webanno by webanno.

the class Tsv3XDeserializer method getOrCreateSpanAnnotation.

private AnnotationFS getOrCreateSpanAnnotation(TsvColumn aCol, TsvUnit aUnit, int aStackingIndex, String aDisambiguationInfo) {
    int disambiguationId = aDisambiguationInfo != null ? Integer.valueOf(aDisambiguationInfo) : -1;
    // Check if we have seen the same annotation already in the current unit but in another
    // column.
    AnnotationFS annotation = aUnit.getUimaAnnotation(aCol.uimaType, aStackingIndex);
    // If not, check if we have seen the same annotation already in a previous unit
    if (annotation == null && disambiguationId != -1) {
        annotation = aUnit.getDocument().getDisambiguatedAnnotation(disambiguationId);
        if (annotation != null) {
            aUnit.addUimaAnnotation(annotation);
            // Extend the span of the existing annotation
            // Unfortunately, the AnnotationFS interface does not define a setEnd() method.
            setFeature(annotation, CAS.FEATURE_BASE_NAME_END, aUnit.getEnd());
        }
    }
    // Still no annotation? Then we have to create one
    if (annotation == null) {
        annotation = aUnit.getDocument().getJCas().getCas().createAnnotation(aCol.uimaType, aUnit.getBegin(), aUnit.getEnd());
        aUnit.addUimaAnnotation(annotation);
        // Check if there are slot features that need to be initialized
        List<TsvColumn> otherColumnsForType = aUnit.getDocument().getSchema().getColumns(aCol.uimaType);
        for (TsvColumn col : otherColumnsForType) {
            if (SLOT_TARGET.equals(col.featureType)) {
                setFeature(annotation, col.uimaFeature.getShortName(), emptyList());
            }
        }
        // Special handling of DKPro Core Token-attached annotations
        if (Lemma.class.getName().equals(aCol.uimaType.getName())) {
            TsvToken token = (TsvToken) aUnit;
            token.getUimaToken().setLemma((Lemma) annotation);
        }
        if (Stem.class.getName().equals(aCol.uimaType.getName())) {
            TsvToken token = (TsvToken) aUnit;
            token.getUimaToken().setStem((Stem) annotation);
        }
        if (MorphologicalFeatures.class.getName().equals(aCol.uimaType.getName())) {
            TsvToken token = (TsvToken) aUnit;
            token.getUimaToken().setMorph((MorphologicalFeatures) annotation);
        }
        if (POS.class.getName().equals(aCol.uimaType.getName())) {
            TsvToken token = (TsvToken) aUnit;
            token.getUimaToken().setPos((POS) annotation);
        }
    }
    // to extend the range of multi-token IDs.
    if (disambiguationId != -1) {
        aUnit.getDocument().addDisambiguationId(annotation, disambiguationId);
    }
    return annotation;
}
Also used : AnnotationFS(org.apache.uima.cas.text.AnnotationFS) TsvColumn(de.tudarmstadt.ukp.clarin.webanno.tsv.internal.tsv3x.model.TsvColumn) MorphologicalFeatures(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.morph.MorphologicalFeatures) POS(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS) Lemma(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Lemma) TsvToken(de.tudarmstadt.ukp.clarin.webanno.tsv.internal.tsv3x.model.TsvToken) Stem(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Stem)

Example 3 with POS

use of de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS in project webanno by webanno.

the class Conll2009Reader method convert.

public void convert(JCas aJCas, BufferedReader aReader) throws IOException {
    if (readPos) {
        try {
            posMappingProvider.configure(aJCas.getCas());
        } catch (AnalysisEngineProcessException e) {
            throw new IOException(e);
        }
    }
    JCasBuilder doc = new JCasBuilder(aJCas);
    List<String[]> words;
    while ((words = readSentence(aReader)) != null) {
        if (words.isEmpty()) {
            // markers following each other.
            continue;
        }
        int sentenceBegin = doc.getPosition();
        int sentenceEnd = sentenceBegin;
        // Tokens, Lemma, POS
        Map<Integer, Token> tokens = new HashMap<Integer, Token>();
        List<SemPred> preds = new ArrayList<>();
        Iterator<String[]> wordIterator = words.iterator();
        while (wordIterator.hasNext()) {
            String[] word = wordIterator.next();
            // Read token
            Token token = doc.add(word[FORM], Token.class);
            tokens.put(Integer.valueOf(word[ID]), token);
            if (wordIterator.hasNext()) {
                doc.add(" ");
            }
            // Read lemma
            if (!UNUSED.equals(word[LEMMA]) && readLemma) {
                Lemma lemma = new Lemma(aJCas, token.getBegin(), token.getEnd());
                lemma.setValue(word[LEMMA]);
                lemma.addToIndexes();
                token.setLemma(lemma);
            }
            // Read part-of-speech tag
            if (!UNUSED.equals(word[POS]) && readPos) {
                Type posTag = posMappingProvider.getTagType(word[POS]);
                POS pos = (POS) aJCas.getCas().createAnnotation(posTag, token.getBegin(), token.getEnd());
                pos.setPosValue(word[POS].intern());
                // WebAnno did not yet backport the coarse grained POS feature from
                // DKPro Core 1.9.0
                // POSUtils.assignCoarseValue(pos);
                pos.addToIndexes();
                token.setPos(pos);
            }
            // Read morphological features
            if (!UNUSED.equals(word[FEAT]) && readMorph) {
                MorphologicalFeatures morphtag = new MorphologicalFeatures(aJCas, token.getBegin(), token.getEnd());
                morphtag.setValue(word[FEAT]);
                morphtag.addToIndexes();
            }
            if (!UNUSED.equals(word[PRED]) && readSemanticPredicate) {
                SemPred pred = new SemPred(aJCas, token.getBegin(), token.getEnd());
                pred.setCategory(word[PRED]);
                pred.addToIndexes();
                preds.add(pred);
            }
            sentenceEnd = token.getEnd();
        }
        // Dependencies
        if (readDependency) {
            for (String[] word : words) {
                if (!UNUSED.equals(word[DEPREL])) {
                    int depId = Integer.valueOf(word[ID]);
                    int govId = Integer.valueOf(word[HEAD]);
                    // Model the root as a loop onto itself
                    if (govId == 0) {
                        // Not using ROOT here because WebAnno cannot deal with elevated
                        // types
                        Dependency rel = new Dependency(aJCas);
                        rel.setGovernor(tokens.get(depId));
                        rel.setDependent(tokens.get(depId));
                        rel.setDependencyType(word[DEPREL]);
                        rel.setBegin(rel.getDependent().getBegin());
                        rel.setEnd(rel.getDependent().getEnd());
                        // This is set via FSUtil because we still use the DKPro Core 1.7.0 JCas
                        // classes
                        FSUtil.setFeature(rel, "flavor", DependencyFlavor.BASIC);
                        rel.addToIndexes();
                    } else {
                        Dependency rel = new Dependency(aJCas);
                        rel.setGovernor(tokens.get(govId));
                        rel.setDependent(tokens.get(depId));
                        rel.setDependencyType(word[DEPREL]);
                        rel.setBegin(rel.getDependent().getBegin());
                        rel.setEnd(rel.getDependent().getEnd());
                        // This is set via FSUtil because we still use the DKPro Core 1.7.0 JCas
                        // classes
                        FSUtil.setFeature(rel, "flavor", DependencyFlavor.BASIC);
                        rel.addToIndexes();
                    }
                }
            }
        }
        // Semantic arguments
        if (readSemanticPredicate) {
            // Get arguments for one predicate at a time
            for (int p = 0; p < preds.size(); p++) {
                List<SemArgLink> args = new ArrayList<>();
                for (String[] word : words) {
                    if (!UNUSED.equals(word[APRED + p])) {
                        Token token = tokens.get(Integer.valueOf(word[ID]));
                        SemArg arg = new SemArg(aJCas, token.getBegin(), token.getEnd());
                        arg.addToIndexes();
                        SemArgLink link = new SemArgLink(aJCas);
                        link.setRole(word[APRED + p]);
                        link.setTarget(arg);
                        args.add(link);
                    }
                }
                SemPred pred = preds.get(p);
                pred.setArguments(FSCollectionFactory.createFSArray(aJCas, args));
            }
        }
        // Sentence
        Sentence sentence = new Sentence(aJCas, sentenceBegin, sentenceEnd);
        sentence.addToIndexes();
        // Once sentence per line.
        doc.add("\n");
    }
    doc.close();
}
Also used : MorphologicalFeatures(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.morph.MorphologicalFeatures) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) SemArgLink(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemArgLink) Token(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token) IOException(java.io.IOException) Dependency(de.tudarmstadt.ukp.dkpro.core.api.syntax.type.dependency.Dependency) AnalysisEngineProcessException(org.apache.uima.analysis_engine.AnalysisEngineProcessException) Type(org.apache.uima.cas.Type) POS(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS) JCasBuilder(org.apache.uima.fit.factory.JCasBuilder) Lemma(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Lemma) SemPred(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemPred) Sentence(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence) SemArg(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemArg)

Example 4 with POS

use of de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS in project webanno by webanno.

the class Conll2009Writer method convert.

private void convert(JCas aJCas, PrintWriter aOut) {
    Map<Token, Collection<SemPred>> predIdx = indexCovered(aJCas, Token.class, SemPred.class);
    Map<SemArg, Collection<Token>> argIdx = indexCovered(aJCas, SemArg.class, Token.class);
    for (Sentence sentence : select(aJCas, Sentence.class)) {
        HashMap<Token, Row> ctokens = new LinkedHashMap<Token, Row>();
        // Tokens
        List<Token> tokens = selectCovered(Token.class, sentence);
        // Check if we should try to include the FEATS in output
        List<MorphologicalFeatures> morphology = selectCovered(MorphologicalFeatures.class, sentence);
        boolean useFeats = tokens.size() == morphology.size();
        List<SemPred> preds = selectCovered(SemPred.class, sentence);
        for (int i = 0; i < tokens.size(); i++) {
            Row row = new Row();
            row.id = i + 1;
            row.token = tokens.get(i);
            row.args = new SemArgLink[preds.size()];
            if (useFeats) {
                row.feats = morphology.get(i);
            }
            // If there are multiple semantic predicates for the current token, then
            // we keep only the first
            Collection<SemPred> predsForToken = predIdx.get(row.token);
            if (predsForToken != null && !predsForToken.isEmpty()) {
                row.pred = predsForToken.iterator().next();
            }
            ctokens.put(row.token, row);
        }
        // Dependencies
        List<Dependency> basicDeps = selectCovered(Dependency.class, sentence).stream().filter(dep -> {
            String flavor = FSUtil.getFeature(dep, "flavor", String.class);
            return flavor == null || DependencyFlavor.BASIC.equals(flavor);
        }).collect(Collectors.toList());
        for (Dependency rel : basicDeps) {
            Row row = ctokens.get(rel.getDependent());
            if (row.deprel != null) {
                throw new IllegalStateException("Illegal basic dependency structure - token [" + row.token.getCoveredText() + "] is dependent of more than one dependency.");
            }
            row.deprel = rel;
        }
        // Semantic arguments
        for (int p = 0; p < preds.size(); p++) {
            FSArray args = preds.get(p).getArguments();
            for (SemArgLink arg : select(args, SemArgLink.class)) {
                for (Token t : argIdx.get(arg.getTarget())) {
                    Row row = ctokens.get(t);
                    row.args[p] = arg;
                }
            }
        }
        // Write sentence in CONLL 2009 format
        for (Row row : ctokens.values()) {
            int id = row.id;
            String form = row.token.getCoveredText();
            String lemma = UNUSED;
            if (writeLemma && (row.token.getLemma() != null)) {
                lemma = row.token.getLemma().getValue();
            }
            String plemma = lemma;
            String pos = UNUSED;
            if (writePos && (row.token.getPos() != null)) {
                POS posAnno = row.token.getPos();
                pos = posAnno.getPosValue();
            }
            String ppos = pos;
            String feat = UNUSED;
            if (writeMorph && (row.feats != null)) {
                feat = row.feats.getValue();
            }
            String pfeat = feat;
            int headId = UNUSED_INT;
            String deprel = UNUSED;
            if (writeDependency && (row.deprel != null)) {
                deprel = row.deprel.getDependencyType();
                headId = ctokens.get(row.deprel.getGovernor()).id;
                if (headId == row.id) {
                    // ROOT dependencies may be modeled as a loop, ignore these.
                    headId = 0;
                }
            }
            String head = UNUSED;
            if (headId != UNUSED_INT) {
                head = Integer.toString(headId);
            }
            String phead = head;
            String pdeprel = deprel;
            String fillpred = UNUSED;
            String pred = UNUSED;
            StringBuilder apreds = new StringBuilder();
            if (writeSemanticPredicate) {
                if (row.pred != null) {
                    fillpred = "Y";
                    pred = row.pred.getCategory();
                }
                for (SemArgLink arg : row.args) {
                    if (apreds.length() > 0) {
                        apreds.append('\t');
                    }
                    apreds.append(arg != null ? arg.getRole() : UNUSED);
                }
            }
            aOut.printf("%d\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n", id, form, lemma, plemma, pos, ppos, feat, pfeat, head, phead, deprel, pdeprel, fillpred, pred, apreds);
        }
        aOut.println();
    }
}
Also used : FSArray(org.apache.uima.jcas.cas.FSArray) JCasFileWriter_ImplBase(de.tudarmstadt.ukp.dkpro.core.api.io.JCasFileWriter_ImplBase) SemArgLink(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemArgLink) HashMap(java.util.HashMap) Sentence(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence) FSUtil(org.apache.uima.fit.util.FSUtil) LinkedHashMap(java.util.LinkedHashMap) TypeCapability(org.apache.uima.fit.descriptor.TypeCapability) Token(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token) SemPred(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemPred) Dependency(de.tudarmstadt.ukp.dkpro.core.api.syntax.type.dependency.Dependency) ConfigurationParameter(org.apache.uima.fit.descriptor.ConfigurationParameter) DependencyFlavor(de.tudarmstadt.ukp.dkpro.core.api.syntax.type.dependency.DependencyFlavor) Map(java.util.Map) OutputStreamWriter(java.io.OutputStreamWriter) SemArg(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemArg) PrintWriter(java.io.PrintWriter) JCas(org.apache.uima.jcas.JCas) ResourceMetaData(org.apache.uima.fit.descriptor.ResourceMetaData) MorphologicalFeatures(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.morph.MorphologicalFeatures) JCasUtil.indexCovered(org.apache.uima.fit.util.JCasUtil.indexCovered) POS(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS) ComponentParameters(de.tudarmstadt.ukp.dkpro.core.api.parameter.ComponentParameters) IOUtils.closeQuietly(org.apache.commons.io.IOUtils.closeQuietly) Collection(java.util.Collection) Collectors(java.util.stream.Collectors) List(java.util.List) AnalysisEngineProcessException(org.apache.uima.analysis_engine.AnalysisEngineProcessException) JCasUtil.select(org.apache.uima.fit.util.JCasUtil.select) JCasUtil.selectCovered(org.apache.uima.fit.util.JCasUtil.selectCovered) SemArgLink(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemArgLink) Token(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token) LinkedHashMap(java.util.LinkedHashMap) FSArray(org.apache.uima.jcas.cas.FSArray) SemPred(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemPred) Sentence(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence) MorphologicalFeatures(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.morph.MorphologicalFeatures) Dependency(de.tudarmstadt.ukp.dkpro.core.api.syntax.type.dependency.Dependency) POS(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS) Collection(java.util.Collection) SemArg(de.tudarmstadt.ukp.dkpro.core.api.semantics.type.SemArg)

Example 5 with POS

use of de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS in project webanno by webanno.

the class ConllUReader method convert.

public void convert(JCas aJCas, BufferedReader aReader) throws IOException {
    if (readPos) {
        try {
            posMappingProvider.configure(aJCas.getCas());
        } catch (AnalysisEngineProcessException e) {
            throw new IOException(e);
        }
    }
    JCasBuilder doc = new JCasBuilder(aJCas);
    List<String[]> words;
    while ((words = readSentence(aReader)) != null) {
        if (words.isEmpty()) {
            // markers following each other.
            continue;
        }
        int sentenceBegin = doc.getPosition();
        int sentenceEnd = sentenceBegin;
        int surfaceBegin = -1;
        int surfaceEnd = -1;
        String surfaceString = null;
        // Tokens, Lemma, POS
        Int2ObjectMap<Token> tokens = new Int2ObjectOpenHashMap<>();
        for (String[] word : words) {
            if (word[ID].contains("-")) {
                String[] fragments = word[ID].split("-");
                surfaceBegin = Integer.valueOf(fragments[0]);
                surfaceEnd = Integer.valueOf(fragments[1]);
                surfaceString = word[FORM];
                continue;
            }
            // Read token
            int tokenIdx = Integer.valueOf(word[ID]);
            Token token = doc.add(word[FORM], Token.class);
            tokens.put(tokenIdx, token);
            if (!StringUtils.contains(word[MISC], "SpaceAfter=No")) {
                doc.add(" ");
            }
            // Read lemma
            if (!UNUSED.equals(word[LEMMA]) && readLemma) {
                Lemma lemma = new Lemma(aJCas, token.getBegin(), token.getEnd());
                lemma.setValue(word[LEMMA]);
                lemma.addToIndexes();
                token.setLemma(lemma);
            }
            // Read part-of-speech tag
            if (!UNUSED.equals(word[POSTAG]) && readPos) {
                Type posTag = posMappingProvider.getTagType(word[POSTAG]);
                POS pos = (POS) aJCas.getCas().createAnnotation(posTag, token.getBegin(), token.getEnd());
                pos.setPosValue(word[POSTAG]);
                pos.addToIndexes();
                token.setPos(pos);
            }
            // Read morphological features
            if (!UNUSED.equals(word[FEATS]) && readMorph) {
                MorphologicalFeatures morphtag = new MorphologicalFeatures(aJCas, token.getBegin(), token.getEnd());
                morphtag.setValue(word[FEATS]);
                morphtag.addToIndexes();
                token.setMorph(morphtag);
                // Try parsing out individual feature values. Since the DKPro Core
                // MorphologicalFeatures type is based on the definition from the UD project,
                // we can do this rather straightforwardly.
                Type morphType = morphtag.getType();
                String[] items = word[FEATS].split("\\|");
                for (String item : items) {
                    String[] keyValue = item.split("=");
                    StringBuilder key = new StringBuilder(keyValue[0]);
                    key.setCharAt(0, Character.toLowerCase(key.charAt(0)));
                    String value = keyValue[1];
                    Feature feat = morphType.getFeatureByBaseName(key.toString());
                    if (feat != null) {
                        morphtag.setStringValue(feat, value);
                    }
                }
            }
            // Read surface form
            if (tokenIdx == surfaceEnd) {
                int begin = tokens.get(surfaceBegin).getBegin();
                int end = tokens.get(surfaceEnd).getEnd();
                SurfaceForm surfaceForm = new SurfaceForm(aJCas, begin, end);
                surfaceForm.setValue(surfaceString);
                surfaceForm.addToIndexes();
                surfaceBegin = -1;
                surfaceEnd = -1;
                surfaceString = null;
            }
            sentenceEnd = token.getEnd();
        }
        // Dependencies
        if (readDependency) {
            for (String[] word : words) {
                if (!UNUSED.equals(word[DEPREL])) {
                    int depId = Integer.valueOf(word[ID]);
                    int govId = Integer.valueOf(word[HEAD]);
                    // Model the root as a loop onto itself
                    makeDependency(aJCas, govId, depId, word[DEPREL], DependencyFlavor.BASIC, tokens, word);
                }
                if (!UNUSED.equals(word[DEPS])) {
                    // list items separated by vertical bar
                    String[] items = word[DEPS].split("\\|");
                    for (String item : items) {
                        String[] sItem = item.split(":");
                        int depId = Integer.valueOf(word[ID]);
                        int govId = Integer.valueOf(sItem[0]);
                        makeDependency(aJCas, govId, depId, sItem[1], DependencyFlavor.ENHANCED, tokens, word);
                    }
                }
            }
        }
        // Sentence
        Sentence sentence = new Sentence(aJCas, sentenceBegin, sentenceEnd);
        sentence.addToIndexes();
        // Once sentence per line.
        doc.add("\n");
    }
    doc.close();
}
Also used : Int2ObjectOpenHashMap(it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap) MorphologicalFeatures(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.morph.MorphologicalFeatures) Token(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token) IOException(java.io.IOException) AnalysisEngineProcessException(org.apache.uima.analysis_engine.AnalysisEngineProcessException) Feature(org.apache.uima.cas.Feature) Type(org.apache.uima.cas.Type) POS(de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS) SurfaceForm(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.SurfaceForm) JCasBuilder(org.apache.uima.fit.factory.JCasBuilder) Lemma(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Lemma) Sentence(de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence)

Aggregations

POS (de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.pos.POS)35 Token (de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Token)21 ArrayList (java.util.ArrayList)15 JCas (org.apache.uima.jcas.JCas)14 Test (org.junit.Test)12 Lemma (de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Lemma)11 Dependency (de.tudarmstadt.ukp.dkpro.core.api.syntax.type.dependency.Dependency)9 Sentence (de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Sentence)8 List (java.util.List)8 Type (org.apache.uima.cas.Type)8 AnnotationFeature (de.tudarmstadt.ukp.clarin.webanno.model.AnnotationFeature)7 MorphologicalFeatures (de.tudarmstadt.ukp.dkpro.core.api.lexmorph.type.morph.MorphologicalFeatures)7 LinkedHashMap (java.util.LinkedHashMap)7 Feature (org.apache.uima.cas.Feature)7 AnnotationFS (org.apache.uima.cas.text.AnnotationFS)7 Stem (de.tudarmstadt.ukp.dkpro.core.api.segmentation.type.Stem)5 HashMap (java.util.HashMap)5 Evaluator (de.tudarmstadt.ukp.clarin.webanno.constraints.evaluator.Evaluator)3 PossibleValue (de.tudarmstadt.ukp.clarin.webanno.constraints.evaluator.PossibleValue)3 ValuesGenerator (de.tudarmstadt.ukp.clarin.webanno.constraints.evaluator.ValuesGenerator)3