use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class HunspellOperations method generateAnnotationFileSingleSplit.
private static void generateAnnotationFileSingleSplit(Path vocab) throws IOException {
List<String> words = Files.readAllLines(vocab, StandardCharsets.UTF_8);
TurkishMorphology morphology = TurkishMorphology.createWithDefaults();
List<String> annotations = new ArrayList<>();
for (String word : words) {
WordAnalysis analysis = morphology.analyze(word);
if (!analysis.isCorrect()) {
Log.warn("Cannot analyze %s", word);
continue;
}
LinkedHashSet<String> stemEndings = new LinkedHashSet<>();
for (SingleAnalysis s : analysis) {
if (s.getDictionaryItem().secondaryPos == SecondaryPos.ProperNoun || s.getDictionaryItem().secondaryPos == SecondaryPos.Abbreviation) {
continue;
}
List<String> stems = s.getStems();
for (String stem : stems) {
String ending = word.substring(stem.length());
if (!(stem + ending).equals(word)) {
Log.warn("Stem + Ending %s+%s does not match word %s", stem, ending, word);
continue;
}
if (ending.length() > 0) {
stemEndings.add(word + " " + stem + " " + ending);
} else {
stemEndings.add(word + " " + stem);
}
}
}
annotations.add(String.join(",", stemEndings));
}
Files.write(Paths.get("data/vocabulary/annonations.txt"), annotations, StandardCharsets.UTF_8);
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class HunspellOperations method filterVocab.
private static void filterVocab(Path vocabFile, Path outFile) throws IOException {
List<String> words = Files.readAllLines(vocabFile, StandardCharsets.UTF_8);
TurkishMorphology morphology = TurkishMorphology.createWithDefaults();
List<String> result = new ArrayList<>();
for (String word : words) {
WordAnalysis analysis = morphology.analyze(word);
if (!analysis.isCorrect()) {
Log.warn("Cannot analyze %s", word);
continue;
}
result.add(word);
}
Files.write(outFile, result, StandardCharsets.UTF_8);
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class DataConverter method extract.
private static void extract(Path dataPath, Path output) throws IOException {
DataSet set = com.google.common.io.Files.asCharSource(dataPath.toFile(), Charsets.UTF_8).readLines(new DataSetLoader());
TurkishMorphology morphology = TurkishMorphology.create(RootLexicon.builder().addTextDictionaryResources("tr/master-dictionary.dict", "tr/non-tdk.dict", "tr/proper.dict", "tr/proper-from-corpus.dict", "tr/abbreviations.dict", "tr/person-names.dict").build());
List<SentenceAnalysis> result = new ArrayList<>();
Histogram<String> parseFails = new Histogram<>();
for (SentenceData sentenceData : set) {
// System.out.println(sentenceData.correctParse);
List<String> tokens = Splitter.on(" ").splitToList(sentenceData.sentence());
if (tokens.size() == 0 || tokens.size() != sentenceData.correctParse.size()) {
continue;
}
List<SentenceWordAnalysis> correctList = new ArrayList<>();
for (int i = 0; i < tokens.size(); i++) {
String s = tokens.get(i);
String p = sentenceData.correctParse.get(i);
p = p.replaceAll("PCNom", "PCNOM");
p = p.replaceAll("Pnon|Nom", "");
p = p.replaceAll("\\+Pos\\+", "+");
p = p.replaceAll("\\+Pos\\^DB", "^DB");
p = p.replaceAll("[+]+", "+");
p = p.replaceAll("[+]$", "");
p = p.replaceAll("[+]\\^DB", "^DB");
p = p.replaceAll("[.]", "");
p = p.toLowerCase(Turkish.LOCALE);
p = p.replaceAll("adverb", "adv");
p = p.replaceAll("\\+cop\\+a3sg", "+a3sg+cop");
p = p.replaceAll("\\+Unable", "^DB+Verb+Able+Neg");
if (lookup.containsKey(p)) {
p = lookup.get(p);
}
WordAnalysis a = morphology.analyze(s);
if (!a.isCorrect()) {
break;
}
SingleAnalysis best = null;
for (SingleAnalysis analysis : a) {
String of = convert(analysis);
if (of.equals(p)) {
best = analysis;
break;
}
}
if (best == null) {
if (Character.isUpperCase(s.charAt(0)) && (p.contains("+noun") && !p.contains("prop"))) {
String pp = p.replaceFirst("\\+noun", "\\+noun+prop");
for (SingleAnalysis analysis : a) {
String of = convert(analysis);
if (of.equals(pp)) {
best = analysis;
break;
}
}
}
}
if (best == null) {
List<String> z = a.getAnalysisResults().stream().map(DataConverter::convert).collect(Collectors.toList());
parseFails.add(s + " " + p);
} else {
correctList.add(new SentenceWordAnalysis(best, a));
}
}
if (correctList.size() == tokens.size()) {
result.add(new SentenceAnalysis(sentenceData.sentence(), correctList));
}
}
Scripts.saveUnambiguous(result, output);
parseFails.removeSmaller(3);
parseFails.saveSortedByCounts(Paths.get("parse-fails.txt"), " ");
System.out.format("Full Sentence Match = %d in %d%n", result.size(), set.sentences.size());
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class Scripts method saveUnambiguous.
public static void saveUnambiguous(List<String> sentences, TurkishMorphology morphology, Path out) throws IOException {
try (PrintWriter pwMorph = new PrintWriter(out.toFile(), "utf-8")) {
for (String sentence : sentences) {
SentenceAnalysis analysis = morphology.analyzeAndDisambiguate(sentence);
if (analysis.bestAnalysis().stream().anyMatch(SingleAnalysis::isUnknown)) {
continue;
}
pwMorph.format("S:%s%n", sentence);
for (SentenceWordAnalysis sw : analysis) {
WordAnalysis wa = sw.getWordAnalysis();
pwMorph.println(wa.getInput());
SingleAnalysis best = sw.getBestAnalysis();
for (SingleAnalysis singleAnalysis : wa) {
boolean isBest = singleAnalysis.equals(best);
if (wa.analysisCount() == 1) {
pwMorph.println(singleAnalysis.formatLong());
} else {
pwMorph.format("%s%s%n", singleAnalysis.formatLong(), isBest ? "*" : "");
}
}
}
pwMorph.println();
}
}
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class AmbiguityStats method ambiguousWordStats.
public void ambiguousWordStats(String filename) throws IOException {
List<String> lines = readAll(filename);
Histogram<String> uniques = new Histogram<>(1000000);
int total = 0;
Splitter splitter = Splitter.on(" ").omitEmptyStrings().trimResults();
for (String line : lines) {
for (String s : splitter.split(line)) {
List<WordAnalysis> results = parser.getWordAnalyzer().analyze(TurkishAlphabet.INSTANCE.normalize(s));
total++;
if (total % 50000 == 0) {
System.out.println("Processed: " + total);
}
if (results.size() > 1) {
uniques.add(s);
}
}
}
System.out.println("Total: " + total);
Stats st = new Stats(0.002);
st.allCounts = (int) uniques.totalCount();
st.allUniques = uniques.size();
for (String s : uniques.getSortedList()) {
int count = uniques.getCount(s);
if (st.overCutoff(count)) {
String p1 = percentStr3(count, st.allCounts);
st.significantCounts += count;
st.significantUniques++;
System.out.println(s + " : " + count + " " + pp(p1));
}
}
st.dump();
}
Aggregations