use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class DistanceBasedStemmer method findStems.
public void findStems(String str) {
str = "<s> <s> " + str + " </s> </s>";
SentenceAnalysis analysis = sentenceAnalyzer.analyze(str);
for (int i = 2; i < analysis.size() - 2; i++) {
String s = analysis.getInput(i);
List<String> bigramContext = Lists.newArrayList(normalize(analysis.getInput(i - 1)), normalize(analysis.getInput(i - 2)), normalize(analysis.getInput(i + 1)), normalize(analysis.getInput(i + 2)));
List<String> unigramContext = Lists.newArrayList(normalize(analysis.getInput(i - 1)), normalize(analysis.getInput(i + 1)));
Set<String> stems = new HashSet<>();
List<WordAnalysis> wordResults = analysis.getParses(i);
stems.addAll(wordResults.stream().map(a -> normalize(a.getLemma())).collect(Collectors.toList()));
List<ScoredItem<String>> scores = new ArrayList<>();
for (String stem : stems) {
if (!distances.containsWord(stem)) {
Log.info("Cannot find %s in vocab.", stem);
continue;
}
List<WordDistances.Distance> distances = this.distances.getDistance(stem);
float score = totalDistance(stem, bigramContext);
int k = 0;
for (WordDistances.Distance distance : distances) {
/* if (s.equals(distance.word)) {
continue;
}*/
score += distance(s, distance.word);
if (k++ == 10) {
break;
}
}
scores.add(new ScoredItem<>(stem, score));
}
Collections.sort(scores);
Log.info("%n%s : ", s);
for (ScoredItem<String> score : scores) {
Log.info("Lemma = %s Score = %.7f", score.item, score.score);
}
}
Log.info("==== Z disambiguation result ===== ");
sentenceAnalyzer.disambiguate(analysis);
for (SentenceAnalysis.Entry a : analysis) {
Log.info("%n%s : ", a.input);
LinkedHashSet<String> items = new LinkedHashSet<>();
for (WordAnalysis wa : a.parses) {
items.add(wa.dictionaryItem.toString());
}
for (String item : items) {
Log.info("%s", item);
}
}
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class WordHistogram method generateHistograms.
static void generateHistograms(List<String> paragraphs, Path outRoot) throws IOException {
TurkishMorphology morphology = TurkishMorphology.builder().addDefaultDictionaries().cacheParameters(75_000, 150_000).build();
TurkishSentenceAnalyzer analyzer = new TurkishSentenceAnalyzer(morphology, new Z3MarkovModelDisambiguator());
Histogram<String> roots = new Histogram<>(1000_000);
Histogram<String> words = new Histogram<>(1000_000);
int paragraphCounter = 0;
int sentenceCounter = 0;
int tokenCounter = 0;
for (String paragraph : paragraphs) {
List<String> sentences = TurkishSentenceExtractor.DEFAULT.fromParagraph(paragraph);
sentenceCounter += sentences.size();
for (String sentence : sentences) {
List<Token> tokens = TurkishTokenizer.DEFAULT.tokenize(sentence);
tokenCounter += tokens.size();
SentenceAnalysis analysis = analyzer.analyze(sentence);
analyzer.disambiguate(analysis);
for (SentenceAnalysis.Entry e : analysis) {
WordAnalysis best = e.parses.get(0);
if (best.getPos() == PrimaryPos.Numeral || best.getPos() == PrimaryPos.Punctuation) {
continue;
}
if (best.isUnknown()) {
continue;
}
if (best.isRuntime() && !Strings.containsNone(e.input, "01234567890")) {
continue;
}
List<String> lemmas = best.getLemmas();
if (lemmas.size() == 0) {
continue;
}
roots.add(best.getDictionaryItem().lemma);
String w = e.input;
if (best.getDictionaryItem().secondaryPos != SecondaryPos.ProperNoun) {
w = w.toLowerCase(Turkish.LOCALE);
} else {
w = Turkish.capitalize(w);
}
words.add(w);
}
}
paragraphCounter++;
if (paragraphCounter % 1000 == 0) {
System.out.println(paragraphCounter + " of " + paragraphs.size());
}
}
System.out.println("tokenCounter = " + tokenCounter);
System.out.println("sentenceCounter = " + sentenceCounter);
Files.createDirectories(outRoot);
roots.saveSortedByCounts(outRoot.resolve("roots.freq.txt"), " ");
roots.saveSortedByKeys(outRoot.resolve("roots.keys.txt"), " ", Turkish.STRING_COMPARATOR_ASC);
words.saveSortedByCounts(outRoot.resolve("words.freq.txt"), " ");
words.saveSortedByKeys(outRoot.resolve("words.keys.txt"), " ", Turkish.STRING_COMPARATOR_ASC);
words.removeSmaller(10);
words.saveSortedByCounts(outRoot.resolve("words10.freq.txt"), " ");
words.saveSortedByKeys(outRoot.resolve("words10.keys.txt"), " ", Turkish.STRING_COMPARATOR_ASC);
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class CategoryPredictionExperiment method generateSets.
private void generateSets(Path input, Path train, Path test, boolean useOnlyTitle, boolean useRoots) throws IOException {
TurkishMorphology morphology = TurkishMorphology.createWithDefaults();
TurkishSentenceAnalyzer analyzer = new TurkishSentenceAnalyzer(morphology, new Z3MarkovModelDisambiguator());
WebCorpus corpus = new WebCorpus("category", "category");
Log.info("Loading corpus from %s", input);
corpus.addDocuments(WebCorpus.loadDocuments(input));
List<String> set = new ArrayList<>(corpus.documentCount());
TurkishTokenizer lexer = TurkishTokenizer.DEFAULT;
Histogram<String> categoryCounts = new Histogram<>();
for (WebDocument document : corpus.getDocuments()) {
String category = document.getCategory();
if (category.length() > 0) {
categoryCounts.add(category);
}
}
Log.info("All category count = %d", categoryCounts.size());
categoryCounts.removeSmaller(20);
Log.info("Reduced label count = %d", categoryCounts.size());
Log.info("Extracting data from %d documents ", corpus.documentCount());
int c = 0;
for (WebDocument document : corpus.getDocuments()) {
if (document.getCategory().length() == 0) {
continue;
}
if (useOnlyTitle && document.getTitle().length() == 0) {
continue;
}
String content = document.getContentAsString();
String title = document.getTitle();
List<Token> docTokens = useOnlyTitle ? lexer.tokenize(title) : lexer.tokenize(content);
List<String> reduced = new ArrayList<>(docTokens.size());
String category = document.getCategory();
if (categoryCounts.contains(category)) {
category = "__label__" + document.getCategory().replaceAll("[ ]+", "_").toLowerCase(Turkish.LOCALE);
} else {
continue;
}
for (Token token : docTokens) {
if (token.getType() == TurkishLexer.PercentNumeral || token.getType() == TurkishLexer.Number || token.getType() == TurkishLexer.Punctuation || token.getType() == TurkishLexer.RomanNumeral || token.getType() == TurkishLexer.Time || token.getType() == TurkishLexer.UnknownWord || token.getType() == TurkishLexer.Unknown) {
continue;
}
String tokenStr = token.getText();
reduced.add(tokenStr);
}
String join = String.join(" ", reduced);
if (useRoots) {
SentenceAnalysis analysis = analyzer.analyze(join);
analyzer.disambiguate(analysis);
List<String> res = new ArrayList<>();
for (SentenceAnalysis.Entry e : analysis) {
WordAnalysis best = e.parses.get(0);
if (best.isUnknown()) {
res.add(e.input);
continue;
}
List<String> lemmas = best.getLemmas();
if (lemmas.size() == 0) {
continue;
}
res.add(lemmas.get(lemmas.size() - 1));
}
join = String.join(" ", res);
}
set.add("#" + document.getId() + " " + category + " " + join.replaceAll("[']", "").toLowerCase(Turkish.LOCALE));
if (c++ % 1000 == 0) {
Log.info("%d of %d processed.", c, corpus.documentCount());
}
}
Log.info("Generate train and test set.");
saveSets(train, test, new LinkedHashSet<>(set));
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class UnsupervisedKeyPhraseExtractor method collectCorpusStatisticsForLemmas.
static CorpusStatistics collectCorpusStatisticsForLemmas(WebCorpus corpus, TurkishSentenceAnalyzer analyzer, int count) throws IOException {
CorpusStatistics statistics = new CorpusStatistics(1_000_000);
int docCount = 0;
for (WebDocument document : corpus.getDocuments()) {
Histogram<String> docHistogram = new Histogram<>();
List<String> sentences = extractor.fromParagraphs(document.getLines());
for (String sentence : sentences) {
List<WordAnalysis> analysis = analyzer.bestParse(sentence);
for (WordAnalysis w : analysis) {
if (!analysisAcceptable(w)) {
continue;
}
String s = w.getSurfaceForm();
if (TurkishStopWords.DEFAULT.contains(s)) {
continue;
}
List<String> lemmas = w.getLemmas();
docHistogram.add(lemmas.get(lemmas.size() - 1));
}
}
statistics.termFrequencies.add(docHistogram);
for (String s : docHistogram) {
statistics.documentFrequencies.add(s);
}
if (docCount++ % 500 == 0) {
Log.info("Doc count = %d", docCount);
}
if (count > 0 && docCount > count) {
break;
}
}
statistics.documentCount = count > 0 ? Math.min(count, corpus.documentCount()) : corpus.documentCount();
return statistics;
}
use of zemberek.morphology.analysis.WordAnalysis in project zemberek-nlp by ahmetaa.
the class UnsupervisedKeyPhraseExtractor method lemmaNgrams.
private List<Histogram<Term>> lemmaNgrams(List<String> paragraphs) {
List<Histogram<Term>> ngrams = new ArrayList<>(order + 1);
for (int i = 0; i < order; i++) {
ngrams.add(new Histogram<>(100));
}
int tokenCount = 0;
List<String> sentences = extractor.fromParagraphs(paragraphs);
for (String sentence : sentences) {
List<WordAnalysis> analysis = sentenceAnalyzer.bestParse(sentence);
for (int i = 0; i < order; i++) {
int currentOrder = i + 1;
for (int j = 0; j < analysis.size() - currentOrder; j++) {
String[] words = new String[currentOrder];
boolean fail = false;
for (int k = 0; k < currentOrder; k++) {
WordAnalysis a = analysis.get(j + k);
if (!analysisAcceptable(a)) {
fail = true;
break;
}
String surface = a.getSurfaceForm();
if (TurkishStopWords.DEFAULT.contains(surface)) {
fail = true;
break;
}
List<String> lemmas = a.getLemmas();
words[k] = lemmas.get(lemmas.size() - 1);
}
if (!fail) {
Term term = new Term(words);
int count = ngrams.get(i).add(term);
if (count == 1) {
// if this is the first time, set the first occurance index.
term.setFirstOccurrenceIndex(tokenCount + j);
}
}
tokenCount += analysis.size();
}
}
}
return ngrams;
}
Aggregations