Search in sources :

Example 56 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestHunspellStemFilter method testRandomStrings.

/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
    Analyzer analyzer = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
            return new TokenStreamComponents(tokenizer, new HunspellStemFilter(tokenizer, dictionary));
        }
    };
    checkRandomData(random(), analyzer, 1000 * RANDOM_MULTIPLIER);
    analyzer.close();
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) Analyzer(org.apache.lucene.analysis.Analyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Example 57 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestHunspellStemFilter method testLongestOnly.

/** simple test for longestOnly option */
public void testLongestOnly() throws IOException {
    MockTokenizer tokenizer = whitespaceMockTokenizer("lucene is awesome");
    tokenizer.setEnableChecks(true);
    HunspellStemFilter filter = new HunspellStemFilter(tokenizer, dictionary, true, true);
    assertTokenStreamContents(filter, new String[] { "lucene", "is", "awesome" }, new int[] { 1, 1, 1 });
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer)

Example 58 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestArabicNormalizationFilter method check.

private void check(final String input, final String expected) throws IOException {
    MockTokenizer tokenStream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    tokenStream.setReader(new StringReader(input));
    ArabicNormalizationFilter filter = new ArabicNormalizationFilter(tokenStream);
    assertTokenStreamContents(filter, new String[] { expected });
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) StringReader(java.io.StringReader)

Example 59 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestBrazilianStemFilterFactory method testStemming.

/**
   * Ensure the filter actually stems and normalizes text.
   */
public void testStemming() throws Exception {
    Reader reader = new StringReader("Brasília");
    Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    tokenizer.setReader(reader);
    TokenStream stream = tokenFilterFactory("BrazilianStem").create(tokenizer);
    assertTokenStreamContents(stream, new String[] { "brasil" });
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) StringReader(java.io.StringReader) Reader(java.io.Reader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer)

Example 60 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestLimitTokenOffsetFilter method test.

public void test() throws Exception {
    for (final boolean consumeAll : new boolean[] { true, false }) {
        MockTokenizer tokenizer = whitespaceMockTokenizer("A1 B2 C3 D4 E5 F6");
        tokenizer.setEnableChecks(consumeAll);
        //note with '3', this test would fail if erroneously the filter used endOffset instead
        TokenStream stream = new LimitTokenOffsetFilter(tokenizer, 3, consumeAll);
        assertTokenStreamContents(stream, new String[] { "A1", "B2" });
    }
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream)

Aggregations

MockTokenizer (org.apache.lucene.analysis.MockTokenizer)280 Tokenizer (org.apache.lucene.analysis.Tokenizer)204 Analyzer (org.apache.lucene.analysis.Analyzer)161 StringReader (java.io.StringReader)118 TokenStream (org.apache.lucene.analysis.TokenStream)116 KeywordTokenizer (org.apache.lucene.analysis.core.KeywordTokenizer)106 Reader (java.io.Reader)59 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)54 CharArraySet (org.apache.lucene.analysis.CharArraySet)44 Directory (org.apache.lucene.store.Directory)36 Document (org.apache.lucene.document.Document)31 BytesRef (org.apache.lucene.util.BytesRef)25 SetKeywordMarkerFilter (org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter)21 TextField (org.apache.lucene.document.TextField)20 CannedTokenStream (org.apache.lucene.analysis.CannedTokenStream)18 Field (org.apache.lucene.document.Field)17 FieldType (org.apache.lucene.document.FieldType)14 StringField (org.apache.lucene.document.StringField)11 Input (org.apache.lucene.search.suggest.Input)11 InputArrayIterator (org.apache.lucene.search.suggest.InputArrayIterator)11