Search in sources :

Example 16 with AnalyzedSentence

use of org.languagetool.AnalyzedSentence in project languagetool by languagetool-org.

the class GoogleTokenTest method testTokenizationWithPosTag.

@Test
public void testTokenizationWithPosTag() throws IOException {
    JLanguageTool lt = new JLanguageTool(new PosTaggingDemo());
    AnalyzedSentence analyzedSentence = lt.getAnalyzedSentence("This, isn't a test.");
    List<GoogleToken> tokens = GoogleToken.getGoogleTokens(analyzedSentence, false, new MyWordTokenizer());
    assertThat(tokens.get(0).token, is("This"));
    assertThat(tokens.get(0).posTags.toString(), is("[This/DT]"));
    assertThat(tokens.get(1).token, is(","));
    assertThat(tokens.get(1).posTags.toString(), is("[,/null]"));
    assertThat(tokens.get(2).token, is("isn"));
    assertThat(tokens.get(3).token, is("'t"));
    assertThat(tokens.get(4).token, is("a"));
    assertThat(tokens.get(5).token, is("test"));
    assertThat(tokens.get(5).posTags.toString(), is("[test/NN]"));
    assertThat(tokens.get(6).token, is("."));
}
Also used : AnalyzedSentence(org.languagetool.AnalyzedSentence) JLanguageTool(org.languagetool.JLanguageTool) Test(org.junit.Test)

Example 17 with AnalyzedSentence

use of org.languagetool.AnalyzedSentence in project languagetool by languagetool-org.

the class EnglishPartialPosTagFilter method tag.

@Override
protected List<AnalyzedTokenReadings> tag(String token) {
    try {
        List<AnalyzedTokenReadings> tags = tagger.tag(Collections.singletonList(token));
        AnalyzedTokenReadings[] atr = tags.toArray(new AnalyzedTokenReadings[tags.size()]);
        AnalyzedSentence disambiguated = disambiguator.disambiguate(new AnalyzedSentence(atr));
        return Arrays.asList(disambiguated.getTokens());
    } catch (IOException e) {
        throw new RuntimeException("Could not tag and disambiguate '" + token + "'", e);
    }
}
Also used : AnalyzedSentence(org.languagetool.AnalyzedSentence) IOException(java.io.IOException) AnalyzedTokenReadings(org.languagetool.AnalyzedTokenReadings)

Example 18 with AnalyzedSentence

use of org.languagetool.AnalyzedSentence in project languagetool by languagetool-org.

the class EnglishChunkerTest method testContractions.

@Test
public void testContractions() throws Exception {
    JLanguageTool langTool = new JLanguageTool(new English());
    AnalyzedSentence analyzedSentence = langTool.getAnalyzedSentence("I'll be there");
    AnalyzedTokenReadings[] tokens = analyzedSentence.getTokens();
    assertThat(tokens[1].getChunkTags().get(0), is(new ChunkTag("B-NP-singular")));
    // "'" cannot be mapped as we tokenize differently
    assertThat(tokens[2].getChunkTags().size(), is(0));
    // "ll" cannot be mapped as we tokenize differently
    assertThat(tokens[3].getChunkTags().size(), is(0));
    assertThat(tokens[5].getChunkTags().get(0), is(new ChunkTag("I-VP")));
}
Also used : English(org.languagetool.language.English) AnalyzedSentence(org.languagetool.AnalyzedSentence) JLanguageTool(org.languagetool.JLanguageTool) AnalyzedTokenReadings(org.languagetool.AnalyzedTokenReadings) Test(org.junit.Test)

Example 19 with AnalyzedSentence

use of org.languagetool.AnalyzedSentence in project languagetool by languagetool-org.

the class EnglishChunkerTest method testAddChunkTagsSingular.

@Test
public void testAddChunkTagsSingular() throws Exception {
    EnglishChunker chunker = new EnglishChunker();
    JLanguageTool lt = new JLanguageTool(new English());
    List<AnalyzedSentence> sentences = lt.analyzeText("The abacus shows how numbers can be stored");
    List<AnalyzedTokenReadings> readingsList = Arrays.asList(sentences.get(0).getTokens());
    chunker.addChunkTags(readingsList);
    // "The abacus":
    assertThat(readingsList.get(1).getChunkTags().toString(), is("[B-NP-singular]"));
    assertThat(readingsList.get(3).getChunkTags().toString(), is("[E-NP-singular]"));
    // "numbers":
    assertThat(readingsList.get(9).getChunkTags().toString(), is("[B-NP-plural, E-NP-plural]"));
}
Also used : English(org.languagetool.language.English) AnalyzedSentence(org.languagetool.AnalyzedSentence) JLanguageTool(org.languagetool.JLanguageTool) AnalyzedTokenReadings(org.languagetool.AnalyzedTokenReadings) Test(org.junit.Test)

Example 20 with AnalyzedSentence

use of org.languagetool.AnalyzedSentence in project languagetool by languagetool-org.

the class LanguageToolFilter method incrementToken.

@Override
public boolean incrementToken() throws IOException {
    if (posStack.size() > 0) {
        String pop = posStack.pop();
        restoreState(current);
        termAtt.append(pop);
        posIncrAtt.setPositionIncrement(0);
        typeAtt.setType("pos");
        return true;
    }
    if (tokenIter == null || !tokenIter.hasNext()) {
        // there are no remaining tokens from the current sentence... are there more sentences?
        if (input.incrementToken()) {
            // a new sentence is available: process it.
            String sentenceStr = termAtt.toString();
            collectedInput.append(sentenceStr);
            if (sentenceStr.length() >= 255) {
                // later. See https://github.com/languagetool-org/languagetool/issues/364
                return true;
            } else {
                sentenceStr = collectedInput.toString();
                collectedInput.setLength(0);
            }
            AnalyzedSentence sentence = languageTool.getAnalyzedSentence(sentenceStr);
            List<AnalyzedTokenReadings> tokenBuffer = Arrays.asList(sentence.getTokens());
            tokenIter = tokenBuffer.iterator();
            /*
         * it should not be possible to have a sentence with 0 words, check just in case. returning
         * EOS isn't the best either, but it's the behavior of the original code.
         */
            if (!tokenIter.hasNext()) {
                return false;
            }
        } else {
            // no more sentences, end of stream!
            return false;
        }
    }
    // It must clear attributes, as it is creating new tokens.
    clearAttributes();
    AnalyzedTokenReadings tr = tokenIter.next();
    // add POS tag for sentence start.
    if (tr.isSentenceStart()) {
        // TODO: would be needed so negated tokens can match on something (see testNegatedMatchAtSentenceStart())
        // but breaks other cases:
        //termAtt.append("SENT_START");
        typeAtt.setType("pos");
        String posTag = tr.getAnalyzedToken(0).getPOSTag();
        String lemma = tr.getAnalyzedToken(0).getLemma();
        if (toLowerCase) {
            termAtt.append(POS_PREFIX.toLowerCase()).append(posTag.toLowerCase());
            if (lemma != null) {
                termAtt.append(LEMMA_PREFIX.toLowerCase()).append(lemma.toLowerCase());
            }
        } else {
            termAtt.append(POS_PREFIX).append(posTag);
            if (lemma != null) {
                termAtt.append(LEMMA_PREFIX).append(lemma);
            }
        }
        return true;
    }
    // by pass the white spaces.
    if (tr.isWhitespace()) {
        return this.incrementToken();
    }
    offsetAtt.setOffset(tr.getStartPos(), tr.getEndPos());
    for (AnalyzedToken token : tr) {
        if (token.getPOSTag() != null) {
            if (toLowerCase) {
                posStack.push(POS_PREFIX.toLowerCase() + token.getPOSTag().toLowerCase());
            } else {
                posStack.push(POS_PREFIX + token.getPOSTag());
            }
        }
        if (token.getLemma() != null) {
            if (toLowerCase) {
                posStack.push(LEMMA_PREFIX.toLowerCase() + token.getLemma().toLowerCase());
            } else {
                // chances are good this is the same for all loop iterations, store it anyway...
                posStack.push(LEMMA_PREFIX + token.getLemma());
            }
        }
    }
    current = captureState();
    if (toLowerCase) {
        termAtt.append(tr.getAnalyzedToken(0).getToken().toLowerCase());
    } else {
        termAtt.append(tr.getAnalyzedToken(0).getToken());
    }
    return true;
}
Also used : AnalyzedSentence(org.languagetool.AnalyzedSentence) AnalyzedToken(org.languagetool.AnalyzedToken) AnalyzedTokenReadings(org.languagetool.AnalyzedTokenReadings)

Aggregations

AnalyzedSentence (org.languagetool.AnalyzedSentence)40 AnalyzedTokenReadings (org.languagetool.AnalyzedTokenReadings)21 ArrayList (java.util.ArrayList)8 Test (org.junit.Test)8 JLanguageTool (org.languagetool.JLanguageTool)8 RuleMatch (org.languagetool.rules.RuleMatch)8 Rule (org.languagetool.rules.Rule)5 IOException (java.io.IOException)4 DisambiguationPatternRule (org.languagetool.tagging.disambiguation.rules.DisambiguationPatternRule)4 English (org.languagetool.language.English)3 SpellingCheckRule (org.languagetool.rules.spelling.SpellingCheckRule)3 AnalyzedToken (org.languagetool.AnalyzedToken)2 Ukrainian (org.languagetool.language.Ukrainian)2 InputStream (java.io.InputStream)1 Document (org.apache.lucene.document.Document)1 ConfusionSet (org.languagetool.rules.ConfusionSet)1 CorrectExample (org.languagetool.rules.CorrectExample)1 IncorrectExample (org.languagetool.rules.IncorrectExample)1 BitextRule (org.languagetool.rules.bitext.BitextRule)1 ConfusionProbabilityRule (org.languagetool.rules.ngrams.ConfusionProbabilityRule)1