Search in sources :

Example 6 with CorefCluster

use of edu.stanford.nlp.coref.data.CorefCluster in project CoreNLP by stanfordnlp.

the class SingletonPredictor method generateFeatureVectors.

/**
   * Generate the training features from the CoNLL input file.
   * @return Dataset of feature vectors
   * @throws Exception
   */
private static GeneralDataset<String, String> generateFeatureVectors(Properties props) throws Exception {
    GeneralDataset<String, String> dataset = new Dataset<>();
    Dictionaries dict = new Dictionaries(props);
    DocumentMaker docMaker = new DocumentMaker(props, dict);
    Document document;
    while ((document = docMaker.nextDoc()) != null) {
        setTokenIndices(document);
        Map<Integer, CorefCluster> entities = document.goldCorefClusters;
        // Generate features for coreferent mentions with class label 1
        for (CorefCluster entity : entities.values()) {
            for (Mention mention : entity.getCorefMentions()) {
                // Ignore verbal mentions
                if (mention.headWord.tag().startsWith("V"))
                    continue;
                IndexedWord head = mention.enhancedDependency.getNodeByIndexSafe(mention.headWord.index());
                if (head == null)
                    continue;
                ArrayList<String> feats = mention.getSingletonFeatures(dict);
                dataset.add(new BasicDatum<>(feats, "1"));
            }
        }
        // Generate features for singletons with class label 0
        ArrayList<CoreLabel> gold_heads = new ArrayList<>();
        for (Mention gold_men : document.goldMentionsByID.values()) {
            gold_heads.add(gold_men.headWord);
        }
        for (Mention predicted_men : document.predictedMentionsByID.values()) {
            SemanticGraph dep = predicted_men.enhancedDependency;
            IndexedWord head = dep.getNodeByIndexSafe(predicted_men.headWord.index());
            if (head == null || !dep.vertexSet().contains(head))
                continue;
            // Ignore verbal mentions
            if (predicted_men.headWord.tag().startsWith("V"))
                continue;
            // If the mention is in the gold set, it is not a singleton and thus ignore
            if (gold_heads.contains(predicted_men.headWord))
                continue;
            dataset.add(new BasicDatum<>(predicted_men.getSingletonFeatures(dict), "0"));
        }
    }
    dataset.summaryStatistics();
    return dataset;
}
Also used : Dictionaries(edu.stanford.nlp.coref.data.Dictionaries) GeneralDataset(edu.stanford.nlp.classify.GeneralDataset) Dataset(edu.stanford.nlp.classify.Dataset) ArrayList(java.util.ArrayList) Document(edu.stanford.nlp.coref.data.Document) CoreLabel(edu.stanford.nlp.ling.CoreLabel) DocumentMaker(edu.stanford.nlp.coref.data.DocumentMaker) CorefCluster(edu.stanford.nlp.coref.data.CorefCluster) Mention(edu.stanford.nlp.coref.data.Mention) SemanticGraph(edu.stanford.nlp.semgraph.SemanticGraph) IndexedWord(edu.stanford.nlp.ling.IndexedWord)

Example 7 with CorefCluster

use of edu.stanford.nlp.coref.data.CorefCluster in project CoreNLP by stanfordnlp.

the class FeatureExtractor method getFeatures.

private Counter<String> getFeatures(Document doc, Mention m1, Mention m2) {
    assert (m1.appearEarlierThan(m2));
    Counter<String> features = new ClassicCounter<>();
    // global features
    features.incrementCount("bias");
    if (useDocSource) {
        features.incrementCount("doc-type=" + doc.docType);
        if (doc.docInfo != null && doc.docInfo.containsKey("DOC_ID")) {
            features.incrementCount("doc-source=" + doc.docInfo.get("DOC_ID").split("/")[1]);
        }
    }
    // singleton feature conjunctions
    List<String> singletonFeatures1 = m1.getSingletonFeatures(dictionaries);
    List<String> singletonFeatures2 = m2.getSingletonFeatures(dictionaries);
    for (Map.Entry<Integer, String> e : SINGLETON_FEATURES.entrySet()) {
        if (e.getKey() < singletonFeatures1.size() && e.getKey() < singletonFeatures2.size()) {
            features.incrementCount(e.getValue() + "=" + singletonFeatures1.get(e.getKey()) + "_" + singletonFeatures2.get(e.getKey()));
        }
    }
    SemanticGraphEdge p1 = getDependencyParent(m1);
    SemanticGraphEdge p2 = getDependencyParent(m2);
    features.incrementCount("dep-relations=" + (p1 == null ? "null" : p1.getRelation()) + "_" + (p2 == null ? "null" : p2.getRelation()));
    features.incrementCount("roles=" + getRole(m1) + "_" + getRole(m2));
    CoreLabel headCL1 = headWord(m1);
    CoreLabel headCL2 = headWord(m2);
    String headPOS1 = getPOS(headCL1);
    String headPOS2 = getPOS(headCL2);
    features.incrementCount("head-pos-s=" + headPOS1 + "_" + headPOS2);
    features.incrementCount("head-words=" + wordIndicator("h_" + headCL1.word().toLowerCase() + "_" + headCL2.word().toLowerCase(), headPOS1 + "_" + headPOS2));
    // agreement features
    addFeature(features, "animacies-agree", m2.animaciesAgree(m1));
    addFeature(features, "attributes-agree", m2.attributesAgree(m1, dictionaries));
    addFeature(features, "entity-types-agree", m2.entityTypesAgree(m1, dictionaries));
    addFeature(features, "numbers-agree", m2.numbersAgree(m1));
    addFeature(features, "genders-agree", m2.gendersAgree(m1));
    addFeature(features, "ner-strings-equal", m1.nerString.equals(m2.nerString));
    // string matching features
    addFeature(features, "antecedent-head-in-anaphor", headContainedIn(m1, m2));
    addFeature(features, "anaphor-head-in-antecedent", headContainedIn(m2, m1));
    if (m1.mentionType != MentionType.PRONOMINAL && m2.mentionType != MentionType.PRONOMINAL) {
        addFeature(features, "antecedent-in-anaphor", m2.spanToString().toLowerCase().contains(m1.spanToString().toLowerCase()));
        addFeature(features, "anaphor-in-antecedent", m1.spanToString().toLowerCase().contains(m2.spanToString().toLowerCase()));
        addFeature(features, "heads-equal", m1.headString.equalsIgnoreCase(m2.headString));
        addFeature(features, "heads-agree", m2.headsAgree(m1));
        addFeature(features, "exact-match", m1.toString().trim().toLowerCase().equals(m2.toString().trim().toLowerCase()));
        addFeature(features, "partial-match", relaxedStringMatch(m1, m2));
        double editDistance = StringUtils.editDistance(m1.spanToString(), m2.spanToString()) / (double) (m1.spanToString().length() + m2.spanToString().length());
        features.incrementCount("edit-distance", editDistance);
        features.incrementCount("edit-distance=" + ((int) (editDistance * 10) / 10.0));
        double headEditDistance = StringUtils.editDistance(m1.headString, m2.headString) / (double) (m1.headString.length() + m2.headString.length());
        features.incrementCount("head-edit-distance", headEditDistance);
        features.incrementCount("head-edit-distance=" + ((int) (headEditDistance * 10) / 10.0));
    }
    // distance features
    addNumeric(features, "mention-distance", m2.mentionNum - m1.mentionNum);
    addNumeric(features, "sentence-distance", m2.sentNum - m1.sentNum);
    if (m2.sentNum == m1.sentNum) {
        addNumeric(features, "word-distance", m2.startIndex - m1.endIndex);
        if (m1.endIndex > m2.startIndex) {
            features.incrementCount("spans-intersect");
        }
    }
    // setup for dcoref features
    Set<Mention> ms1 = new HashSet<>();
    ms1.add(m1);
    Set<Mention> ms2 = new HashSet<>();
    ms2.add(m2);
    Random r = new Random();
    CorefCluster c1 = new CorefCluster(20000 + r.nextInt(10000), ms1);
    CorefCluster c2 = new CorefCluster(10000 + r.nextInt(10000), ms2);
    String s2 = m2.lowercaseNormalizedSpanString();
    String s1 = m1.lowercaseNormalizedSpanString();
    // discourse dcoref features
    addFeature(features, "mention-speaker-PER0", m2.headWord.get(SpeakerAnnotation.class).equalsIgnoreCase("PER0"));
    addFeature(features, "antecedent-is-anaphor-speaker", CorefRules.antecedentIsMentionSpeaker(doc, m2, m1, dictionaries));
    addFeature(features, "same-speaker", CorefRules.entitySameSpeaker(doc, m2, m1));
    addFeature(features, "person-disagree-same-speaker", CorefRules.entityPersonDisagree(doc, m2, m1, dictionaries) && CorefRules.entitySameSpeaker(doc, m2, m1));
    addFeature(features, "antecedent-matches-anaphor-speaker", CorefRules.antecedentMatchesMentionSpeakerAnnotation(m2, m1, doc));
    addFeature(features, "discourse-you-PER0", m2.person == Person.YOU && doc.docType == DocType.ARTICLE && m2.headWord.get(CoreAnnotations.SpeakerAnnotation.class).equals("PER0"));
    addFeature(features, "speaker-match-i-i", m2.number == Number.SINGULAR && dictionaries.firstPersonPronouns.contains(s1) && m1.number == Number.SINGULAR && dictionaries.firstPersonPronouns.contains(s2) && CorefRules.entitySameSpeaker(doc, m2, m1));
    addFeature(features, "speaker-match-speaker-i", m2.number == Number.SINGULAR && dictionaries.firstPersonPronouns.contains(s2) && CorefRules.antecedentIsMentionSpeaker(doc, m2, m1, dictionaries));
    addFeature(features, "speaker-match-i-speaker", m1.number == Number.SINGULAR && dictionaries.firstPersonPronouns.contains(s1) && CorefRules.antecedentIsMentionSpeaker(doc, m1, m2, dictionaries));
    addFeature(features, "speaker-match-you-you", dictionaries.secondPersonPronouns.contains(s1) && dictionaries.secondPersonPronouns.contains(s2) && CorefRules.entitySameSpeaker(doc, m2, m1));
    addFeature(features, "discourse-between-two-person", ((m2.person == Person.I && m1.person == Person.YOU || (m2.person == Person.YOU && m1.person == Person.I)) && (m2.headWord.get(CoreAnnotations.UtteranceAnnotation.class) - m1.headWord.get(CoreAnnotations.UtteranceAnnotation.class) == 1) && doc.docType == DocType.CONVERSATION));
    addFeature(features, "incompatible-not-match", m1.person != Person.I && m2.person != Person.I && (CorefRules.antecedentIsMentionSpeaker(doc, m1, m2, dictionaries) || CorefRules.antecedentIsMentionSpeaker(doc, m2, m1, dictionaries)));
    int utteranceDist = Math.abs(m1.headWord.get(CoreAnnotations.UtteranceAnnotation.class) - m2.headWord.get(CoreAnnotations.UtteranceAnnotation.class));
    if (doc.docType != DocType.ARTICLE && utteranceDist == 1 && !CorefRules.entitySameSpeaker(doc, m2, m1)) {
        addFeature(features, "speaker-mismatch-i-i", m1.person == Person.I && m2.person == Person.I);
        addFeature(features, "speaker-mismatch-you-you", m1.person == Person.YOU && m2.person == Person.YOU);
        addFeature(features, "speaker-mismatch-we-we", m1.person == Person.WE && m2.person == Person.WE);
    }
    // other dcoref features
    String firstWord1 = firstWord(m1).word().toLowerCase();
    addFeature(features, "indefinite-article-np", (m1.appositions == null && m1.predicateNominatives == null && (firstWord1.equals("a") || firstWord1.equals("an"))));
    addFeature(features, "far-this", m2.lowercaseNormalizedSpanString().equals("this") && Math.abs(m2.sentNum - m1.sentNum) > 3);
    addFeature(features, "per0-you-in-article", m2.person == Person.YOU && doc.docType == DocType.ARTICLE && m2.headWord.get(CoreAnnotations.SpeakerAnnotation.class).equals("PER0"));
    addFeature(features, "inside-in", m2.insideIn(m1) || m1.insideIn(m2));
    addFeature(features, "indefinite-determiners", dictionaries.indefinitePronouns.contains(m1.originalSpan.get(0).lemma()) || dictionaries.indefinitePronouns.contains(m2.originalSpan.get(0).lemma()));
    addFeature(features, "entity-attributes-agree", CorefRules.entityAttributesAgree(c2, c1));
    addFeature(features, "entity-token-distance", CorefRules.entityTokenDistance(m2, m1));
    addFeature(features, "i-within-i", CorefRules.entityIWithinI(m2, m1, dictionaries));
    addFeature(features, "exact-string-match", CorefRules.entityExactStringMatch(c2, c1, dictionaries, doc.roleSet));
    addFeature(features, "entity-relaxed-heads-agree", CorefRules.entityRelaxedHeadsAgreeBetweenMentions(c2, c1, m2, m1));
    addFeature(features, "is-acronym", CorefRules.entityIsAcronym(doc, c2, c1));
    addFeature(features, "demonym", m2.isDemonym(m1, dictionaries));
    addFeature(features, "incompatible-modifier", CorefRules.entityHaveIncompatibleModifier(m2, m1));
    addFeature(features, "head-lemma-match", m1.headWord.lemma().equals(m2.headWord.lemma()));
    addFeature(features, "words-included", CorefRules.entityWordsIncluded(c2, c1, m2, m1));
    addFeature(features, "extra-proper-noun", CorefRules.entityHaveExtraProperNoun(m2, m1, new HashSet<>()));
    addFeature(features, "number-in-later-mentions", CorefRules.entityNumberInLaterMention(m2, m1));
    addFeature(features, "sentence-context-incompatible", CorefRules.sentenceContextIncompatible(m2, m1, dictionaries));
    // syntax features
    if (useConstituencyParse) {
        if (m1.sentNum == m2.sentNum) {
            int clauseCount = 0;
            Tree tree = m2.contextParseTree;
            Tree current = m2.mentionSubTree;
            while (true) {
                current = current.ancestor(1, tree);
                if (current.label().value().startsWith("S")) {
                    clauseCount++;
                }
                if (current.dominates(m1.mentionSubTree)) {
                    break;
                }
                if (current.label().value().equals("ROOT") || current.ancestor(1, tree) == null) {
                    break;
                }
            }
            features.incrementCount("clause-count", clauseCount);
            features.incrementCount("clause-count=" + bin(clauseCount));
        }
        if (RuleBasedCorefMentionFinder.isPleonastic(m2, m2.contextParseTree) || RuleBasedCorefMentionFinder.isPleonastic(m1, m1.contextParseTree)) {
            features.incrementCount("pleonastic-it");
        }
        if (maximalNp(m1.mentionSubTree) == maximalNp(m2.mentionSubTree)) {
            features.incrementCount("same-maximal-np");
        }
        boolean m1Embedded = headEmbeddingLevel(m1.mentionSubTree, m1.headIndex - m1.startIndex) > 1;
        boolean m2Embedded = headEmbeddingLevel(m2.mentionSubTree, m2.headIndex - m2.startIndex) > 1;
        features.incrementCount("embedding=" + m1Embedded + "_" + m2Embedded);
    }
    return features;
}
Also used : SemanticGraphEdge(edu.stanford.nlp.semgraph.SemanticGraphEdge) CoreLabel(edu.stanford.nlp.ling.CoreLabel) Random(java.util.Random) CorefCluster(edu.stanford.nlp.coref.data.CorefCluster) Mention(edu.stanford.nlp.coref.data.Mention) ClassicCounter(edu.stanford.nlp.stats.ClassicCounter) CoreAnnotations(edu.stanford.nlp.ling.CoreAnnotations) Tree(edu.stanford.nlp.trees.Tree) HashMap(java.util.HashMap) Map(java.util.Map) HashSet(java.util.HashSet) SpeakerAnnotation(edu.stanford.nlp.ling.CoreAnnotations.SpeakerAnnotation)

Example 8 with CorefCluster

use of edu.stanford.nlp.coref.data.CorefCluster in project CoreNLP by stanfordnlp.

the class NeuralCorefDataExporter method process.

@Override
public void process(int id, Document document) {
    JsonArrayBuilder clusters = Json.createArrayBuilder();
    for (CorefCluster gold : document.goldCorefClusters.values()) {
        JsonArrayBuilder c = Json.createArrayBuilder();
        for (Mention m : gold.corefMentions) {
            c.add(m.mentionID);
        }
        clusters.add(c.build());
    }
    goldClusterWriter.println(Json.createObjectBuilder().add(String.valueOf(id), clusters.build()).build());
    Map<Pair<Integer, Integer>, Boolean> mentionPairs = CorefUtils.getLabeledMentionPairs(document);
    List<Mention> mentionsList = CorefUtils.getSortedMentions(document);
    Map<Integer, List<Mention>> mentionsByHeadIndex = new HashMap<>();
    for (int i = 0; i < mentionsList.size(); i++) {
        Mention m = mentionsList.get(i);
        List<Mention> withIndex = mentionsByHeadIndex.get(m.headIndex);
        if (withIndex == null) {
            withIndex = new ArrayList<>();
            mentionsByHeadIndex.put(m.headIndex, withIndex);
        }
        withIndex.add(m);
    }
    JsonObjectBuilder docFeatures = Json.createObjectBuilder();
    docFeatures.add("doc_id", id);
    docFeatures.add("type", document.docType == DocType.ARTICLE ? 1 : 0);
    docFeatures.add("source", document.docInfo.get("DOC_ID").split("/")[0]);
    JsonArrayBuilder sentences = Json.createArrayBuilder();
    for (CoreMap sentence : document.annotation.get(SentencesAnnotation.class)) {
        sentences.add(getSentenceArray(sentence.get(CoreAnnotations.TokensAnnotation.class)));
    }
    JsonObjectBuilder mentions = Json.createObjectBuilder();
    for (Mention m : document.predictedMentionsByID.values()) {
        Iterator<SemanticGraphEdge> iterator = m.enhancedDependency.incomingEdgeIterator(m.headIndexedWord);
        SemanticGraphEdge relation = iterator.hasNext() ? iterator.next() : null;
        String depRelation = relation == null ? "no-parent" : relation.getRelation().toString();
        String depParent = relation == null ? "<missing>" : relation.getSource().word();
        mentions.add(String.valueOf(m.mentionNum), Json.createObjectBuilder().add("doc_id", id).add("mention_id", m.mentionID).add("mention_num", m.mentionNum).add("sent_num", m.sentNum).add("start_index", m.startIndex).add("end_index", m.endIndex).add("head_index", m.headIndex).add("mention_type", m.mentionType.toString()).add("dep_relation", depRelation).add("dep_parent", depParent).add("sentence", getSentenceArray(m.sentenceWords)).add("contained-in-other-mention", mentionsByHeadIndex.get(m.headIndex).stream().anyMatch(m2 -> m != m2 && m.insideIn(m2)) ? 1 : 0).build());
    }
    JsonArrayBuilder featureNames = Json.createArrayBuilder().add("same-speaker").add("antecedent-is-mention-speaker").add("mention-is-antecedent-speaker").add("relaxed-head-match").add("exact-string-match").add("relaxed-string-match");
    JsonObjectBuilder features = Json.createObjectBuilder();
    JsonObjectBuilder labels = Json.createObjectBuilder();
    for (Map.Entry<Pair<Integer, Integer>, Boolean> e : mentionPairs.entrySet()) {
        Mention m1 = document.predictedMentionsByID.get(e.getKey().first);
        Mention m2 = document.predictedMentionsByID.get(e.getKey().second);
        String key = m1.mentionNum + " " + m2.mentionNum;
        JsonArrayBuilder builder = Json.createArrayBuilder();
        for (int val : CategoricalFeatureExtractor.pairwiseFeatures(document, m1, m2, dictionaries, conll)) {
            builder.add(val);
        }
        features.add(key, builder.build());
        labels.add(key, e.getValue() ? 1 : 0);
    }
    JsonObject docData = Json.createObjectBuilder().add("sentences", sentences.build()).add("mentions", mentions.build()).add("labels", labels.build()).add("pair_feature_names", featureNames.build()).add("pair_features", features.build()).add("document_features", docFeatures.build()).build();
    dataWriter.println(docData);
}
Also used : HashMap(java.util.HashMap) JsonObject(javax.json.JsonObject) SemanticGraphEdge(edu.stanford.nlp.semgraph.SemanticGraphEdge) CorefCluster(edu.stanford.nlp.coref.data.CorefCluster) Mention(edu.stanford.nlp.coref.data.Mention) CoreAnnotations(edu.stanford.nlp.ling.CoreAnnotations) ArrayList(java.util.ArrayList) List(java.util.List) JsonArrayBuilder(javax.json.JsonArrayBuilder) JsonObjectBuilder(javax.json.JsonObjectBuilder) CoreMap(edu.stanford.nlp.util.CoreMap) HashMap(java.util.HashMap) Map(java.util.Map) CoreMap(edu.stanford.nlp.util.CoreMap) Pair(edu.stanford.nlp.util.Pair)

Example 9 with CorefCluster

use of edu.stanford.nlp.coref.data.CorefCluster in project CoreNLP by stanfordnlp.

the class MetadataWriter method process.

@Override
public void process(int id, Document document) {
    // Mention types
    mentionTypes.put(id, document.predictedMentionsByID.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().mentionType.toString())));
    // Gold clusters
    List<List<Integer>> clusters = new ArrayList<>();
    for (CorefCluster c : document.goldCorefClusters.values()) {
        List<Integer> cluster = new ArrayList<>();
        for (Mention m : c.getCorefMentions()) {
            cluster.add(m.mentionID);
        }
        clusters.add(cluster);
    }
    goldClusters.put(id, clusters);
    // Word counting
    if (countWords && mentionPairs.containsKey(id)) {
        Set<Pair<Integer, Integer>> pairs = mentionPairs.get(id).keySet();
        Set<Integer> mentions = new HashSet<>();
        for (Pair<Integer, Integer> pair : pairs) {
            mentions.add(pair.first);
            mentions.add(pair.second);
            Mention m1 = document.predictedMentionsByID.get(pair.first);
            Mention m2 = document.predictedMentionsByID.get(pair.second);
            wordCounts.incrementCount("h_" + m1.headWord.word().toLowerCase() + "_" + m2.headWord.word().toLowerCase());
        }
        Map<Integer, List<CoreLabel>> sentences = new HashMap<>();
        for (int mention : mentions) {
            Mention m = document.predictedMentionsByID.get(mention);
            if (!sentences.containsKey(m.sentNum)) {
                sentences.put(m.sentNum, m.sentenceWords);
            }
        }
        for (List<CoreLabel> sentence : sentences.values()) {
            for (int i = 0; i < sentence.size(); i++) {
                CoreLabel cl = sentence.get(i);
                if (cl == null) {
                    continue;
                }
                String w = cl.word().toLowerCase();
                wordCounts.incrementCount(w);
                if (i > 0) {
                    CoreLabel clp = sentence.get(i - 1);
                    if (clp == null) {
                        continue;
                    }
                    String wp = clp.word().toLowerCase();
                    wordCounts.incrementCount(wp + "_" + w);
                }
            }
        }
    }
}
Also used : HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) CoreLabel(edu.stanford.nlp.ling.CoreLabel) CorefCluster(edu.stanford.nlp.coref.data.CorefCluster) Mention(edu.stanford.nlp.coref.data.Mention) ArrayList(java.util.ArrayList) List(java.util.List) Pair(edu.stanford.nlp.util.Pair) HashSet(java.util.HashSet)

Example 10 with CorefCluster

use of edu.stanford.nlp.coref.data.CorefCluster in project CoreNLP by stanfordnlp.

the class MentionDetectionEvaluator method process.

@Override
public void process(int id, Document document) {
    for (CorefCluster gold : document.goldCorefClusters.values()) {
        for (Mention m : gold.corefMentions) {
            if (document.predictedMentionsByID.containsKey(m.mentionID)) {
                correctSystemMentions += 1;
            }
            goldMentions += 1;
        }
    }
    systemMentions += document.predictedMentionsByID.size();
    double precision = correctSystemMentions / (double) systemMentions;
    double recall = correctSystemMentions / (double) goldMentions;
    log.info("Precision: " + correctSystemMentions + " / " + systemMentions + " = " + String.format("%.4f", precision));
    log.info("Recall: " + correctSystemMentions + " / " + goldMentions + " = " + String.format("%.4f", recall));
    log.info(String.format("F1: %.4f", 2 * precision * recall / (precision + recall)));
}
Also used : CorefCluster(edu.stanford.nlp.coref.data.CorefCluster) Mention(edu.stanford.nlp.coref.data.Mention)

Aggregations

CorefCluster (edu.stanford.nlp.coref.data.CorefCluster)13 Mention (edu.stanford.nlp.coref.data.Mention)11 ArrayList (java.util.ArrayList)6 CoreLabel (edu.stanford.nlp.ling.CoreLabel)4 CoreAnnotations (edu.stanford.nlp.ling.CoreAnnotations)3 HashMap (java.util.HashMap)3 CorefChain (edu.stanford.nlp.coref.data.CorefChain)2 Document (edu.stanford.nlp.coref.data.Document)2 SpeakerAnnotation (edu.stanford.nlp.ling.CoreAnnotations.SpeakerAnnotation)2 SemanticGraphEdge (edu.stanford.nlp.semgraph.SemanticGraphEdge)2 ClassicCounter (edu.stanford.nlp.stats.ClassicCounter)2 Tree (edu.stanford.nlp.trees.Tree)2 Pair (edu.stanford.nlp.util.Pair)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Map (java.util.Map)2 Dataset (edu.stanford.nlp.classify.Dataset)1 GeneralDataset (edu.stanford.nlp.classify.GeneralDataset)1 Dictionaries (edu.stanford.nlp.coref.data.Dictionaries)1 Animacy (edu.stanford.nlp.coref.data.Dictionaries.Animacy)1