use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class _MorphologicalAmbiguityResolverExperiment method collect.
private List<SingleAnalysisSentence> collect(Path p, int maxAnalysisCount) throws IOException {
List<String> sentences = getSentences(p);
TurkishMorphology analyzer = TurkishMorphology.createWithDefaults();
int tokenCount = 0;
int sentenceCount = 0;
List<SingleAnalysisSentence> result = new ArrayList<>();
for (String sentence : sentences) {
sentence = sentence.replaceAll("\\s+|\\u00a0", " ");
sentence = sentence.replaceAll("[\\u00ad]", "");
sentence = sentence.replaceAll("[…]", "...");
List<Single> singleAnalysisWords = new ArrayList<>();
List<Token> tokens = TurkishTokenizer.DEFAULT.tokenize(sentence);
boolean failed = false;
int i = 0;
for (Token token : tokens) {
tokenCount++;
String rawWord = token.getText();
String word = Character.isUpperCase(rawWord.charAt(0)) ? Turkish.capitalize(rawWord) : rawWord.toLowerCase(Turkish.LOCALE);
WordAnalysis results;
if (cache.containsKey(word)) {
results = cache.get(word);
} else {
results = analyzer.analyze(word);
cache.put(word, results);
}
if (results.analysisCount() == 0) {
if (Strings.containsNone(word, "0123456789-.")) {
failedWords.add(word);
}
}
if (results.analysisCount() < 1 || results.analysisCount() > maxAnalysisCount) {
failed = true;
break;
} else {
List<SingleAnalysis> filtered = results.stream().filter(s -> !(s.getDictionaryItem().secondaryPos == SecondaryPos.ProperNoun && Character.isLowerCase(rawWord.charAt(0)))).collect(Collectors.toList());
if (filtered.size() == 0) {
failed = true;
break;
}
singleAnalysisWords.add(new Single(word, i, results.copyFor(filtered)));
i++;
}
}
if (!failed) {
result.add(new SingleAnalysisSentence(sentence, singleAnalysisWords));
}
sentenceCount++;
if (sentenceCount % 2000 == 0) {
Log.info("%d sentences %d tokens analyzed. %d found", sentenceCount, tokenCount, result.size());
}
}
return result;
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class ZemberekNlpScripts method readmeExample1.
@Test
@Ignore("Not a Test")
public void readmeExample1() throws IOException {
TurkishMorphology morphology = TurkishMorphology.createWithDefaults();
WordAnalysis results = morphology.analyze("kalemin");
results.forEach(s -> System.out.println(s.formatLong()));
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class AmbiguityStats method ambiguousWordStats.
public void ambiguousWordStats(String filename) throws IOException {
List<String> lines = readAll(filename);
Histogram<String> uniques = new Histogram<>(1000000);
int total = 0;
Splitter splitter = Splitter.on(" ").omitEmptyStrings().trimResults();
for (String line : lines) {
for (String s : splitter.split(line)) {
WordAnalysis results = parser.analyze(s);
total++;
if (total % 50000 == 0) {
System.out.println("Processed: " + total);
}
if (results.analysisCount() > 1) {
uniques.add(s);
}
}
}
System.out.println("Total: " + total);
Stats st = new Stats(0.002);
st.allCounts = (int) uniques.totalCount();
st.allUniques = uniques.size();
for (String s : uniques.getSortedList()) {
int count = uniques.getCount(s);
if (st.overCutoff(count)) {
String p1 = percentStr3(count, st.allCounts);
st.significantCounts += count;
st.significantUniques++;
System.out.println(s + " : " + count + " " + pp(p1));
}
}
st.dump();
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class NormalizationScripts method generateNormalizationVocabularies.
static void generateNormalizationVocabularies(TurkishMorphology morphology, Path cleanRoot, Path noisyRoot, Path outRoot) throws IOException {
Files.createDirectories(outRoot);
Histogram<String> correctFromNoisy = Histogram.loadFromUtf8File(noisyRoot.resolve("correct"), ' ');
Log.info("Correct from noisy Loaded");
Histogram<String> correctFromClean = Histogram.loadFromUtf8File(cleanRoot.resolve("correct"), ' ');
Log.info("Correct from clean Loaded");
correctFromClean.removeSmaller(2);
correctFromNoisy.removeSmaller(2);
Histogram<String> zero = new Histogram<>();
Histogram<String> zeroWordZeroLemma = new Histogram<>();
Histogram<String> zeroWordLowLemma = new Histogram<>();
Histogram<String> lowFreq = new Histogram<>();
Histogram<String> lowFreqLowLemmaFreq = new Histogram<>();
Histogram<String> unusualProper = new Histogram<>();
Histogram<String> unusualRoots = new Histogram<>();
Histogram<String> ignore = new Histogram<>();
double nTotal = correctFromNoisy.totalCount();
double cTotal = correctFromClean.totalCount();
for (String s : correctFromNoisy) {
if (s.contains(".")) {
ignore.add(s);
continue;
}
int nCount = correctFromNoisy.getCount(s);
double nFreq = nCount / nTotal;
WordAnalysis an = morphology.analyze(s);
if (unusualProper(an)) {
unusualProper.add(s, correctFromNoisy.getCount(s));
continue;
}
if (unusualRoot(an)) {
unusualRoots.add(s, correctFromNoisy.getCount(s));
continue;
}
if (!correctFromClean.contains(s)) {
zero.add(s, nCount);
if (an.analysisCount() > 0) {
Set<String> allLemmas = new HashSet<>();
for (SingleAnalysis analysis : an) {
allLemmas.addAll(analysis.getLemmas());
}
boolean none = true;
boolean lowLemmaRatio = true;
// TODO: this is not the best way. try extracting lemma frequencies from correct from clean
for (String l : allLemmas) {
if (correctFromClean.contains(l)) {
none = false;
double lnf = correctFromNoisy.getCount(l) / nTotal;
double lcf = correctFromClean.getCount(l) / nTotal;
if (lnf / lcf > 10) {
lowLemmaRatio = false;
break;
}
}
}
if (none) {
zeroWordZeroLemma.add(s, nCount);
}
if (lowLemmaRatio) {
zeroWordLowLemma.add(s, nCount);
}
}
continue;
}
double cFreq = correctFromClean.getCount(s) / cTotal;
if (nFreq / cFreq > 30) {
lowFreq.add(s, nCount);
}
}
Log.info("Saving Possibly incorrect words.");
zero.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-zero"), " ");
zeroWordZeroLemma.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-zero-no-lemma"), " ");
zeroWordLowLemma.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-zero-low-lemma"), " ");
lowFreq.saveSortedByCounts(noisyRoot.resolve("possibly-incorrect-lowfreq"), " ");
Log.info("Creating vocabularies");
// ----------- noisy ------------
Histogram<String> noisy = new Histogram<>(1_000_000);
Histogram<String> noisyFromCleanCorpora = Histogram.loadFromUtf8File(cleanRoot.resolve("incorrect"), ' ');
Histogram<String> noisyFromNoisyCorpora = Histogram.loadFromUtf8File(noisyRoot.resolve("incorrect"), ' ');
Log.info("Incorrect words loaded.");
noisyFromCleanCorpora.removeSmaller(2);
noisyFromNoisyCorpora.removeSmaller(2);
noisy.add(noisyFromCleanCorpora);
noisy.add(noisyFromNoisyCorpora);
Histogram<String> possiblyIncorrect = new Histogram<>(1000_000);
possiblyIncorrect.add(zeroWordZeroLemma);
for (String lf : lowFreq) {
if (!possiblyIncorrect.contains(lf)) {
possiblyIncorrect.add(lf, zeroWordZeroLemma.getCount(lf));
}
}
int threshold = 2;
for (String z : zero) {
int c = zero.getCount(z);
if (!possiblyIncorrect.contains(z) && c > threshold) {
possiblyIncorrect.add(z, c);
}
}
Histogram<String> clean = new Histogram<>(1000_000);
clean.add(correctFromClean);
clean.add(correctFromNoisy);
for (String s : clean) {
if (s.contains(".")) {
ignore.add(s);
}
}
clean.removeAll(ignore);
Histogram<String> asciiDuplicates = getAsciiDuplicates(clean);
asciiDuplicates.saveSortedByCounts(outRoot.resolve("ascii-dups"), " ");
possiblyIncorrect.add(asciiDuplicates);
unusualProper.saveSortedByCounts(outRoot.resolve("unusual-proper"), " ");
for (String s : unusualProper) {
if (!possiblyIncorrect.contains(s)) {
possiblyIncorrect.add(s, unusualProper.getCount(s));
}
}
unusualRoots.saveSortedByCounts(outRoot.resolve("unusual-root"), " ");
for (String s : unusualRoots) {
if (!possiblyIncorrect.contains(s)) {
possiblyIncorrect.add(s, unusualRoots.getCount(s));
}
}
possiblyIncorrect.removeAll(ignore);
clean.removeAll(asciiDuplicates);
clean.removeAll(unusualProper);
clean.removeAll(unusualRoots);
clean.removeAll(possiblyIncorrect);
Set<String> intersectionOfKeys = noisy.getIntersectionOfKeys(clean);
int sharedKeyCount = intersectionOfKeys.size();
if (sharedKeyCount > 0) {
Log.warn("Incorrect and correct sets share %d keys", sharedKeyCount);
}
sharedKeyCount = noisy.getIntersectionOfKeys(possiblyIncorrect).size();
if (sharedKeyCount > 0) {
Log.warn("Incorrect and possibly incorrect sets share %d keys", sharedKeyCount);
}
sharedKeyCount = clean.getIntersectionOfKeys(possiblyIncorrect).size();
if (sharedKeyCount > 0) {
Log.warn("Correct and possibly incorrect sets share %d keys", sharedKeyCount);
}
Log.info("Saving sets.");
clean.saveSortedByCounts(outRoot.resolve("correct"), " ");
Log.info("Correct words saved.");
noisy.saveSortedByCounts(outRoot.resolve("incorrect"), " ");
Log.info("Incorrect words saved.");
possiblyIncorrect.saveSortedByCounts(outRoot.resolve("possibly-incorrect"), " ");
Log.info("Possibly Incorrect words saved.");
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class CorpusNerCollector method main.
public static void main(String[] args) throws IOException {
Path corporaRoot = Paths.get("/media/ahmetaa/depo/corpora");
Path corpusDirList = corporaRoot.resolve("ner-list");
Path outRoot = Paths.get("/media/ahmetaa/depo/ner/out");
Files.createDirectories(outRoot);
BlockTextLoader corpusProvider = BlockTextLoader.fromDirectoryRoot(corporaRoot, corpusDirList, 10_000);
// assumes you generated a model in my-model directory.
Path modelRoot = Paths.get("my-model");
TurkishMorphology morphology = TurkishMorphology.builder().setLexicon(RootLexicon.getDefault()).disableUnidentifiedTokenAnalyzer().build();
PerceptronNer ner = PerceptronNer.loadModel(modelRoot, morphology);
Set<String> illegal = Sets.newHashSet(".", ",", "!", "?", ":");
List<String> lines = new ArrayList<>();
int c = 0;
int k = 0;
for (TextChunk chunk : corpusProvider) {
LinkedHashSet<String> sentences = new LinkedHashSet<>(TextCleaner.cleanAndExtractSentences(chunk.getData()));
for (String sentence : sentences) {
if (sentence.length() > 100) {
continue;
}
NerSentence result = ner.findNamedEntities(sentence);
int neCount = result.getNamedEntities().size();
List<NamedEntity> nes = result.getNamedEntities();
boolean badNamedEntity = false;
for (NamedEntity ne : nes) {
for (NerToken token : ne.tokens) {
if (illegal.contains(token.word)) {
badNamedEntity = true;
break;
}
WordAnalysis a = morphology.analyze(token.word);
for (SingleAnalysis analysis : a) {
DictionaryItem item = analysis.getDictionaryItem();
if (item.secondaryPos != SecondaryPos.Abbreviation && item.secondaryPos != SecondaryPos.ProperNoun) {
badNamedEntity = true;
break;
}
}
}
if (badNamedEntity) {
break;
}
}
if (badNamedEntity) {
continue;
}
if (neCount > 0 && neCount < 3) {
lines.add(result.getAsTrainingSentence(AnnotationStyle.BRACKET));
c++;
if (c == 1000) {
Path out = outRoot.resolve(chunk.id + "-" + k);
Files.write(out, lines);
Log.info("%s created. ", out);
lines = new ArrayList<>();
c = 0;
k++;
if (k > 10) {
System.exit(0);
}
}
}
}
}
}
Aggregations