use of org.languagetool.languagemodel.LuceneLanguageModel in project languagetool by languagetool-org.
the class EnglishConfusionProbabilityRuleTest method testRule.
@Test
@Ignore
public void testRule() throws IOException {
File indexDir = new File("/data/google-ngram-index");
if (!indexDir.exists()) {
throw new RuntimeException("ngram data not found at " + indexDir + ", get it at http://wiki.languagetool.org/finding-errors-using-big-data");
}
rule = new EnglishConfusionProbabilityRule(TestTools.getEnglishMessages(), new LuceneLanguageModel(indexDir), english);
Replacement theirThere = new Replacement("there", "their");
assertMatch("Is their a telephone anywhere?", theirThere);
assertMatch("I can't remember how to go their.", theirThere);
assertMatch("Can you please tell me why their seems to be two churches in every village?", theirThere);
assertMatch("Why do American parents praise there children?", theirThere);
assertMatch("The British supplied there native allies with muskets, gunpowder and advice.", theirThere);
Replacement knowNow = new Replacement("know", "now");
assertMatch("From know on let us study in the morning.", knowNow);
assertMatch("I am from Hiroshima, but know I live in Tokyo.", knowNow);
assertMatch("I didn't now where it came from.", knowNow);
assertMatch("Let me now if I need to make any changes.", knowNow);
Replacement fourFor = new Replacement("four", "for");
assertMatch("This gives us a minimum date four the age of Afroasiatic.", fourFor);
assertMatch("Agassi admitted that he used and tested positive four methamphetamine in 1997.", fourFor);
assertMatch("Alabama has for of the world's largest stadiums.", fourFor);
assertMatch("There are no male actors and the for leading actresses dubbed themselves in the Castilian version.", fourFor);
}
use of org.languagetool.languagemodel.LuceneLanguageModel in project languagetool by languagetool-org.
the class AllConfusionRulesEvaluator method main.
public static void main(String[] args) throws IOException {
if (args.length < 3 || args.length > 4) {
System.err.println("Usage: " + ConfusionRuleEvaluator.class.getSimpleName() + " <langCode> <languageModelTopDir> <wikipediaXml|tatoebaFile|dir>...");
System.err.println(" <languageModelTopDir> is a directory with sub-directories '1grams', '2grams', and '3grams' with Lucene indexes");
System.err.println(" <wikipediaXml|tatoebaFile|dir> either a Wikipedia XML dump, or a Tatoeba file or");
System.err.println(" a directory with example sentences (where <word>.txt contains only the sentences for <word>).");
System.err.println(" You can specify both a Wikipedia file and a Tatoeba file.");
System.exit(1);
}
Language lang;
if ("en".equals(args[0])) {
lang = new ConfusionRuleEvaluator.EnglishLight();
} else {
lang = Languages.getLanguageForShortCode(args[0]);
}
LanguageModel languageModel = new LuceneLanguageModel(new File(args[1]));
List<String> inputsFiles = new ArrayList<>();
inputsFiles.add(args[2]);
if (args.length >= 4) {
inputsFiles.add(args[3]);
}
ConfusionRuleEvaluator eval = new ConfusionRuleEvaluator(lang, languageModel, false);
eval.setVerboseMode(false);
ConfusionSetLoader confusionSetLoader = new ConfusionSetLoader();
InputStream inputStream = JLanguageTool.getDataBroker().getFromResourceDirAsStream("/en/confusion_sets.txt");
Map<String, List<ConfusionSet>> confusionSetMap = confusionSetLoader.loadConfusionSet(inputStream);
Set<String> done = new HashSet<>();
int fMeasureCount = 0;
float fMeasureTotal = 0;
for (List<ConfusionSet> entry : confusionSetMap.values()) {
for (ConfusionSet confusionSet : entry) {
Set<ConfusionString> set = confusionSet.getSet();
if (set.size() != 2) {
System.out.println("Skipping confusion set with size != 2: " + confusionSet);
} else {
Iterator<ConfusionString> iterator = set.iterator();
ConfusionString set1 = iterator.next();
ConfusionString set2 = iterator.next();
String word1 = set1.getString();
String word2 = set2.getString();
String key = word1 + " " + word2;
if (!done.contains(key)) {
Map<Long, ConfusionRuleEvaluator.EvalResult> evalResults = eval.run(inputsFiles, word1, word2, MAX_SENTENCES, Arrays.asList(confusionSet.getFactor()));
ConfusionRuleEvaluator.EvalResult evalResult = evalResults.values().iterator().next();
String summary1 = set1.getDescription() != null ? word1 + "|" + set1.getDescription() : word1;
String summary2 = set2.getDescription() != null ? word2 + "|" + set2.getDescription() : word2;
String start;
if (summary1.compareTo(summary2) < 0) {
start = summary1 + "; " + summary2 + "; " + confusionSet.getFactor();
} else {
start = summary2 + "; " + summary1 + "; " + confusionSet.getFactor();
}
String spaces = StringUtils.repeat(" ", 82 - start.length());
System.out.println(start + spaces + "# " + evalResult.getSummary());
double fMeasure = FMeasure.getWeightedFMeasure(evalResult.getPrecision(), evalResult.getRecall());
//System.out.println("f-measure: " + fMeasure);
fMeasureCount++;
fMeasureTotal += fMeasure;
}
done.add(key);
}
}
}
System.out.println("Average f-measure: " + (fMeasureTotal / fMeasureCount));
}
use of org.languagetool.languagemodel.LuceneLanguageModel in project languagetool by languagetool-org.
the class AutomaticConfusionRuleEvaluator method run.
private void run(List<String> lines, File indexDir) throws IOException {
Language language = Languages.getLanguageForShortCode(LANGUAGE);
LanguageModel lm = new LuceneLanguageModel(indexDir);
ConfusionRuleEvaluator evaluator = new ConfusionRuleEvaluator(language, lm, CASE_SENSITIVE);
for (String line : lines) {
if (line.contains("#")) {
System.out.println("Ignoring: " + line);
continue;
}
String[] parts = line.split(";\\s*");
if (parts.length != 2) {
throw new IOException("Expected semicolon-separated input: " + line);
}
try {
int i = 1;
for (String part : parts) {
// compare pair-wise - maybe we should compare every item with every other item?
if (i < parts.length) {
runOnPair(evaluator, line, removeComment(part), removeComment(parts[i]));
}
i++;
}
} catch (RuntimeException e) {
e.printStackTrace();
}
}
System.out.println("Done. Ignored items because they are already known: " + ignored);
}
use of org.languagetool.languagemodel.LuceneLanguageModel in project languagetool by languagetool-org.
the class ConfusionRuleEvaluator method main.
public static void main(String[] args) throws IOException {
if (args.length < 5 || args.length > 6) {
System.err.println("Usage: " + ConfusionRuleEvaluator.class.getSimpleName() + " <token> <homophoneToken> <langCode> <languageModelTopDir> <wikipediaXml|tatoebaFile|plainTextFile|dir>...");
System.err.println(" <languageModelTopDir> is a directory with sub-directories like 'en' which then again contain '1grams',");
System.err.println(" '2grams', and '3grams' sub directories with Lucene indexes");
System.err.println(" See http://wiki.languagetool.org/finding-errors-using-n-gram-data");
System.err.println(" <wikipediaXml|tatoebaFile|plainTextFile|dir> either a Wikipedia XML dump, or a Tatoeba file, or");
System.err.println(" a plain text file with one sentence per line, or a directory with");
System.err.println(" example sentences (where <word>.txt contains only the sentences for <word>).");
System.err.println(" You can specify both a Wikipedia file and a Tatoeba file.");
System.exit(1);
}
long startTime = System.currentTimeMillis();
String token = args[0];
String homophoneToken = args[1];
String langCode = args[2];
Language lang;
if ("en".equals(langCode)) {
lang = new EnglishLight();
} else {
lang = Languages.getLanguageForShortCode(langCode);
}
LanguageModel languageModel = new LuceneLanguageModel(new File(args[3], lang.getShortCode()));
//LanguageModel languageModel = new BerkeleyRawLanguageModel(new File("/media/Data/berkeleylm/google_books_binaries/ger.blm.gz"));
//LanguageModel languageModel = new BerkeleyLanguageModel(new File("/media/Data/berkeleylm/google_books_binaries/ger.blm.gz"));
List<String> inputsFiles = new ArrayList<>();
inputsFiles.add(args[4]);
if (args.length >= 6) {
inputsFiles.add(args[5]);
}
ConfusionRuleEvaluator generator = new ConfusionRuleEvaluator(lang, languageModel, CASE_SENSITIVE);
generator.run(inputsFiles, token, homophoneToken, MAX_SENTENCES, EVAL_FACTORS);
long endTime = System.currentTimeMillis();
System.out.println("\nTime: " + (endTime - startTime) + "ms");
}
use of org.languagetool.languagemodel.LuceneLanguageModel in project languagetool by languagetool-org.
the class LanguageModelSanityTest method testEnglishLanguageModelSanity.
@Test
@Ignore("Interactive use only, requires local ngram index")
public void testEnglishLanguageModelSanity() throws IOException {
LuceneLanguageModel lm = new LuceneLanguageModel(new File(NGRAM_DIR));
// 1gram:
assertMatches(lm, "the");
assertMatches(lm, "The");
assertMatches(lm, ",");
assertMatches(lm, "0");
assertMatches(lm, "1");
assertMatches(lm, "2");
assertMatches(lm, "3");
assertMatches(lm, "4");
assertMatches(lm, "5");
assertMatches(lm, "6");
assertMatches(lm, "7");
assertMatches(lm, "8");
assertMatches(lm, "9");
assertMatches(lm, ":");
assertMatches(lm, "(");
assertMatches(lm, ")");
assertMatches(lm, "£");
// 2gram:
assertMatches(lm, "the man");
assertMatches(lm, "The man");
assertMatches(lm, "_START_ the");
assertMatches(lm, "_START_ The");
assertMatches(lm, "it _END_");
assertMatches(lm, "it .");
assertMatches(lm, "Also ,");
assertMatches(lm, "is 0");
assertMatches(lm, ": it");
assertMatches(lm, "( it");
assertMatches(lm, "it )");
assertMatches(lm, "£ 5");
// 3gram:
assertMatches(lm, "the man who");
assertMatches(lm, "The man who");
assertMatches(lm, "_START_ The man");
assertMatches(lm, "it was _END_");
assertMatches(lm, "it was .");
assertMatches(lm, "Also , it");
assertMatches(lm, "it is 0");
assertMatches(lm, ": it is");
assertMatches(lm, "( it is");
assertMatches(lm, "it is )");
assertMatches(lm, "five - pound");
assertMatches(lm, "is £ 5");
assertMatches(lm, "it 's a");
assertMatches(lm, "it ' s");
// 4gram:
assertMatches(lm, "the man who could");
assertMatches(lm, "The man who could");
assertMatches(lm, "five - pound note");
assertMatches(lm, "_START_ The man who");
assertMatches(lm, "which it was _END_");
assertMatches(lm, "Also , it is");
assertMatches(lm, "when it is 0");
assertMatches(lm, "it is £ 5");
}
Aggregations