Search in sources :

Example 46 with WordAnalysis

use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.

the class _MorphologicalAmbiguityResolverExperiment method collect.

private List<SingleAnalysisSentence> collect(Path p, int maxAnalysisCount) throws IOException {
    List<String> sentences = getSentences(p);
    TurkishMorphology analyzer = TurkishMorphology.createWithDefaults();
    int tokenCount = 0;
    int sentenceCount = 0;
    List<SingleAnalysisSentence> result = new ArrayList<>();
    for (String sentence : sentences) {
        sentence = sentence.replaceAll("\\s+|\\u00a0", " ");
        sentence = sentence.replaceAll("[\\u00ad]", "");
        sentence = sentence.replaceAll("[…]", "...");
        List<Single> singleAnalysisWords = new ArrayList<>();
        List<Token> tokens = TurkishTokenizer.DEFAULT.tokenize(sentence);
        boolean failed = false;
        int i = 0;
        for (Token token : tokens) {
            tokenCount++;
            String rawWord = token.getText();
            String word = Character.isUpperCase(rawWord.charAt(0)) ? Turkish.capitalize(rawWord) : rawWord.toLowerCase(Turkish.LOCALE);
            WordAnalysis results;
            if (cache.containsKey(word)) {
                results = cache.get(word);
            } else {
                results = analyzer.analyze(word);
                cache.put(word, results);
            }
            if (results.analysisCount() == 0) {
                if (Strings.containsNone(word, "0123456789-.")) {
                    failedWords.add(word);
                }
            }
            if (results.analysisCount() < 1 || results.analysisCount() > maxAnalysisCount) {
                failed = true;
                break;
            } else {
                List<SingleAnalysis> filtered = results.stream().filter(s -> !(s.getDictionaryItem().secondaryPos == SecondaryPos.ProperNoun && Character.isLowerCase(rawWord.charAt(0)))).collect(Collectors.toList());
                if (filtered.size() == 0) {
                    failed = true;
                    break;
                }
                singleAnalysisWords.add(new Single(word, i, results.copyFor(filtered)));
                i++;
            }
        }
        if (!failed) {
            result.add(new SingleAnalysisSentence(sentence, singleAnalysisWords));
        }
        sentenceCount++;
        if (sentenceCount % 2000 == 0) {
            Log.info("%d sentences %d tokens analyzed. %d found", sentenceCount, tokenCount, result.size());
        }
    }
    return result;
}
Also used : Strings(zemberek.core.io.Strings) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) Turkish(zemberek.core.turkish.Turkish) Token(zemberek.tokenization.Token) SingleAnalysis(zemberek.morphology.analysis.SingleAnalysis) Map(java.util.Map) TurkishTokenizer(zemberek.tokenization.TurkishTokenizer) Log(zemberek.core.logging.Log) Path(java.nio.file.Path) LinkedHashSet(java.util.LinkedHashSet) Histogram(zemberek.core.collections.Histogram) SecondaryPos(zemberek.core.turkish.SecondaryPos) PrintWriter(java.io.PrintWriter) Files(java.nio.file.Files) IOException(java.io.IOException) Collectors(java.util.stream.Collectors) StandardCharsets(java.nio.charset.StandardCharsets) WordAnalysis(zemberek.morphology.analysis.WordAnalysis) Objects(java.util.Objects) List(java.util.List) Paths(java.nio.file.Paths) TurkishSentenceExtractor(zemberek.tokenization.TurkishSentenceExtractor) LanguageIdentifier(zemberek.langid.LanguageIdentifier) Pattern(java.util.regex.Pattern) SingleAnalysis(zemberek.morphology.analysis.SingleAnalysis) WordAnalysis(zemberek.morphology.analysis.WordAnalysis) ArrayList(java.util.ArrayList) Token(zemberek.tokenization.Token)

Example 47 with WordAnalysis

use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.

the class ZemberekNlpScripts method readmeExample1.

@Test
@Ignore("Not a Test")
public void readmeExample1() throws IOException {
    TurkishMorphology morphology = TurkishMorphology.createWithDefaults();
    WordAnalysis results = morphology.analyze("kalemin");
    results.forEach(s -> System.out.println(s.formatLong()));
}
Also used : WordAnalysis(zemberek.morphology.analysis.WordAnalysis) TurkishMorphology(zemberek.morphology.TurkishMorphology) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 48 with WordAnalysis

use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.

the class AmbiguityStats method ambiguousWordStats.

public void ambiguousWordStats(String filename) throws IOException {
    List<String> lines = readAll(filename);
    Histogram<String> uniques = new Histogram<>(1000000);
    int total = 0;
    Splitter splitter = Splitter.on(" ").omitEmptyStrings().trimResults();
    for (String line : lines) {
        for (String s : splitter.split(line)) {
            WordAnalysis results = parser.analyze(s);
            total++;
            if (total % 50000 == 0) {
                System.out.println("Processed: " + total);
            }
            if (results.analysisCount() > 1) {
                uniques.add(s);
            }
        }
    }
    System.out.println("Total: " + total);
    Stats st = new Stats(0.002);
    st.allCounts = (int) uniques.totalCount();
    st.allUniques = uniques.size();
    for (String s : uniques.getSortedList()) {
        int count = uniques.getCount(s);
        if (st.overCutoff(count)) {
            String p1 = percentStr3(count, st.allCounts);
            st.significantCounts += count;
            st.significantUniques++;
            System.out.println(s + " : " + count + "    " + pp(p1));
        }
    }
    st.dump();
}
Also used : Histogram(zemberek.core.collections.Histogram) Splitter(com.google.common.base.Splitter) WordAnalysis(zemberek.morphology.analysis.WordAnalysis)

Example 49 with WordAnalysis

use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.

the class NormalizationScripts method generateNormalizationVocabularies.

static void generateNormalizationVocabularies(TurkishMorphology morphology, Path cleanRoot, Path noisyRoot, Path outRoot) throws IOException {
    Files.createDirectories(outRoot);
    Histogram<String> correctFromNoisy = Histogram.loadFromUtf8File(noisyRoot.resolve("correct"), ' ');
    Log.info("Correct from noisy Loaded");
    Histogram<String> correctFromClean = Histogram.loadFromUtf8File(cleanRoot.resolve("correct"), ' ');
    Log.info("Correct from clean Loaded");
    correctFromClean.removeSmaller(2);
    correctFromNoisy.removeSmaller(2);
    Histogram<String> zero = new Histogram<>();
    Histogram<String> zeroWordZeroLemma = new Histogram<>();
    Histogram<String> zeroWordLowLemma = new Histogram<>();
    Histogram<String> lowFreq = new Histogram<>();
    Histogram<String> lowFreqLowLemmaFreq = new Histogram<>();
    Histogram<String> unusualProper = new Histogram<>();
    Histogram<String> unusualRoots = new Histogram<>();
    Histogram<String> ignore = new Histogram<>();
    double nTotal = correctFromNoisy.totalCount();
    double cTotal = correctFromClean.totalCount();
    for (String s : correctFromNoisy) {
        if (s.contains(".")) {
            ignore.add(s);
            continue;
        }
        int nCount = correctFromNoisy.getCount(s);
        double nFreq = nCount / nTotal;
        WordAnalysis an = morphology.analyze(s);
        if (unusualProper(an)) {
            unusualProper.add(s, correctFromNoisy.getCount(s));
            continue;
        }
        if (unusualRoot(an)) {
            unusualRoots.add(s, correctFromNoisy.getCount(s));
            continue;
        }
        if (!correctFromClean.contains(s)) {
            zero.add(s, nCount);
            if (an.analysisCount() > 0) {
                Set<String> allLemmas = new HashSet<>();
                for (SingleAnalysis analysis : an) {
                    allLemmas.addAll(analysis.getLemmas());
                }
                boolean none = true;
                boolean lowLemmaRatio = true;
                // TODO: this is not the best way. try extracting lemma frequencies from correct from clean
                for (String l : allLemmas) {
                    if (correctFromClean.contains(l)) {
                        none = false;
                        double lnf = correctFromNoisy.getCount(l) / nTotal;
                        double lcf = correctFromClean.getCount(l) / nTotal;
                        if (lnf / lcf > 10) {
                            lowLemmaRatio = false;
                            break;
                        }
                    }
                }
                if (none) {
                    zeroWordZeroLemma.add(s, nCount);
                }
                if (lowLemmaRatio) {
                    zeroWordLowLemma.add(s, nCount);
                }
            }
            continue;
        }
        double cFreq = correctFromClean.getCount(s) / cTotal;
        if (nFreq / cFreq > 30) {
            lowFreq.add(s, nCount);
        }
    }
    Log.info("Saving Possibly incorrect words.");
    zero.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-zero"), " ");
    zeroWordZeroLemma.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-zero-no-lemma"), " ");
    zeroWordLowLemma.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-zero-low-lemma"), " ");
    lowFreq.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-lowfreq"), " ");
    Log.info("Creating vocabularies");
    // ----------- noisy ------------
    Histogram<String> noisy = new Histogram<>(1_000_000);
    Histogram<String> noisyFromCleanCorpora = Histogram.loadFromUtf8File(cleanRoot.resolve("incorrect"), ' ');
    Histogram<String> noisyFromNoisyCorpora = Histogram.loadFromUtf8File(noisyRoot.resolve("incorrect"), ' ');
    Log.info("Incorrect words loaded.");
    noisyFromCleanCorpora.removeSmaller(2);
    noisyFromNoisyCorpora.removeSmaller(2);
    noisy.add(noisyFromCleanCorpora);
    noisy.add(noisyFromNoisyCorpora);
    Histogram<String> possiblyIncorrect = new Histogram<>(1000_000);
    possiblyIncorrect.add(zeroWordZeroLemma);
    for (String lf : lowFreq) {
        if (!possiblyIncorrect.contains(lf)) {
            possiblyIncorrect.add(lf, zeroWordZeroLemma.getCount(lf));
        }
    }
    int threshold = 2;
    for (String z : zero) {
        int c = zero.getCount(z);
        if (!possiblyIncorrect.contains(z) && c > threshold) {
            possiblyIncorrect.add(z, c);
        }
    }
    Histogram<String> clean = new Histogram<>(1000_000);
    clean.add(correctFromClean);
    clean.add(correctFromNoisy);
    for (String s : clean) {
        if (s.contains(".")) {
            ignore.add(s);
        }
    }
    clean.removeAll(ignore);
    Histogram<String> asciiDuplicates = getAsciiDuplicates(clean);
    asciiDuplicates.saveSortedByCounts(outRoot.resolve("ascii-dups"), " ");
    possiblyIncorrect.add(asciiDuplicates);
    unusualProper.saveSortedByCounts(outRoot.resolve("unusual-proper"), " ");
    for (String s : unusualProper) {
        if (!possiblyIncorrect.contains(s)) {
            possiblyIncorrect.add(s, unusualProper.getCount(s));
        }
    }
    unusualRoots.saveSortedByCounts(outRoot.resolve("unusual-root"), " ");
    for (String s : unusualRoots) {
        if (!possiblyIncorrect.contains(s)) {
            possiblyIncorrect.add(s, unusualRoots.getCount(s));
        }
    }
    possiblyIncorrect.removeAll(ignore);
    clean.removeAll(asciiDuplicates);
    clean.removeAll(unusualProper);
    clean.removeAll(unusualRoots);
    clean.removeAll(possiblyIncorrect);
    Set<String> intersectionOfKeys = noisy.getIntersectionOfKeys(clean);
    int sharedKeyCount = intersectionOfKeys.size();
    if (sharedKeyCount > 0) {
        Log.warn("Incorrect and correct sets share %d keys", sharedKeyCount);
    }
    sharedKeyCount = noisy.getIntersectionOfKeys(possiblyIncorrect).size();
    if (sharedKeyCount > 0) {
        Log.warn("Incorrect and possibly incorrect sets share %d keys", sharedKeyCount);
    }
    sharedKeyCount = clean.getIntersectionOfKeys(possiblyIncorrect).size();
    if (sharedKeyCount > 0) {
        Log.warn("Correct and possibly incorrect sets share %d keys", sharedKeyCount);
    }
    Log.info("Saving sets.");
    clean.saveSortedByCounts(outRoot.resolve("correct"), " ");
    Log.info("Correct words saved.");
    noisy.saveSortedByCounts(outRoot.resolve("incorrect"), " ");
    Log.info("Incorrect words saved.");
    possiblyIncorrect.saveSortedByCounts(outRoot.resolve("possibly-incorrect"), " ");
    Log.info("Possibly Incorrect words saved.");
}
Also used : SingleAnalysis(zemberek.morphology.analysis.SingleAnalysis) Histogram(zemberek.core.collections.Histogram) WordAnalysis(zemberek.morphology.analysis.WordAnalysis) HashSet(java.util.HashSet) LinkedHashSet(java.util.LinkedHashSet)

Example 50 with WordAnalysis

use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.

the class CorpusNerCollector method main.

public static void main(String[] args) throws IOException {
    Path corporaRoot = Paths.get("/media/ahmetaa/depo/corpora");
    Path corpusDirList = corporaRoot.resolve("ner-list");
    Path outRoot = Paths.get("/media/ahmetaa/depo/ner/out");
    Files.createDirectories(outRoot);
    BlockTextLoader corpusProvider = BlockTextLoader.fromDirectoryRoot(corporaRoot, corpusDirList, 10_000);
    // assumes you generated a model in my-model directory.
    Path modelRoot = Paths.get("my-model");
    TurkishMorphology morphology = TurkishMorphology.builder().setLexicon(RootLexicon.getDefault()).disableUnidentifiedTokenAnalyzer().build();
    PerceptronNer ner = PerceptronNer.loadModel(modelRoot, morphology);
    Set<String> illegal = Sets.newHashSet(".", ",", "!", "?", ":");
    List<String> lines = new ArrayList<>();
    int c = 0;
    int k = 0;
    for (TextChunk chunk : corpusProvider) {
        LinkedHashSet<String> sentences = new LinkedHashSet<>(TextCleaner.cleanAndExtractSentences(chunk.getData()));
        for (String sentence : sentences) {
            if (sentence.length() > 100) {
                continue;
            }
            NerSentence result = ner.findNamedEntities(sentence);
            int neCount = result.getNamedEntities().size();
            List<NamedEntity> nes = result.getNamedEntities();
            boolean badNamedEntity = false;
            for (NamedEntity ne : nes) {
                for (NerToken token : ne.tokens) {
                    if (illegal.contains(token.word)) {
                        badNamedEntity = true;
                        break;
                    }
                    WordAnalysis a = morphology.analyze(token.word);
                    for (SingleAnalysis analysis : a) {
                        DictionaryItem item = analysis.getDictionaryItem();
                        if (item.secondaryPos != SecondaryPos.Abbreviation && item.secondaryPos != SecondaryPos.ProperNoun) {
                            badNamedEntity = true;
                            break;
                        }
                    }
                }
                if (badNamedEntity) {
                    break;
                }
            }
            if (badNamedEntity) {
                continue;
            }
            if (neCount > 0 && neCount < 3) {
                lines.add(result.getAsTrainingSentence(AnnotationStyle.BRACKET));
                c++;
                if (c == 1000) {
                    Path out = outRoot.resolve(chunk.id + "-" + k);
                    Files.write(out, lines);
                    Log.info("%s created. ", out);
                    lines = new ArrayList<>();
                    c = 0;
                    k++;
                    if (k > 10) {
                        System.exit(0);
                    }
                }
            }
        }
    }
}
Also used : Path(java.nio.file.Path) LinkedHashSet(java.util.LinkedHashSet) SingleAnalysis(zemberek.morphology.analysis.SingleAnalysis) BlockTextLoader(zemberek.core.text.BlockTextLoader) WordAnalysis(zemberek.morphology.analysis.WordAnalysis) ArrayList(java.util.ArrayList) TextChunk(zemberek.core.text.TextChunk) TurkishMorphology(zemberek.morphology.TurkishMorphology) DictionaryItem(zemberek.morphology.lexicon.DictionaryItem)

Aggregations

WordAnalysis (zemberek.morphology.analysis.WordAnalysis)96 Test (org.junit.Test)42 SingleAnalysis (zemberek.morphology.analysis.SingleAnalysis)36 TurkishMorphology (zemberek.morphology.TurkishMorphology)22 ArrayList (java.util.ArrayList)21 SentenceAnalysis (zemberek.morphology.analysis.SentenceAnalysis)19 LinkedHashSet (java.util.LinkedHashSet)13 Ignore (org.junit.Ignore)13 Histogram (zemberek.core.collections.Histogram)12 Path (java.nio.file.Path)11 PrintWriter (java.io.PrintWriter)10 SentenceWordAnalysis (zemberek.morphology.analysis.SentenceWordAnalysis)10 IOException (java.io.IOException)6 HashSet (java.util.HashSet)6 List (java.util.List)6 WordAnalyzer (zemberek.morphology.analysis.WordAnalyzer)6 SimpleGenerator (zemberek.morphology.generator.SimpleGenerator)6 DictionaryItem (zemberek.morphology.lexicon.DictionaryItem)6 DynamicLexiconGraph (zemberek.morphology.lexicon.graph.DynamicLexiconGraph)6 Log (zemberek.core.logging.Log)5