Search in sources :

Example 61 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestLimitTokenOffsetFilterFactory method test.

public void test() throws Exception {
    for (final boolean consumeAll : new boolean[] { true, false }) {
        Reader reader = new StringReader("A1 B2 C3 D4 E5 F6");
        MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
        tokenizer.setReader(reader);
        tokenizer.setEnableChecks(consumeAll);
        TokenStream stream = tokenizer;
        stream = tokenFilterFactory("LimitTokenOffset", LimitTokenOffsetFilterFactory.MAX_START_OFFSET, "3", LimitTokenOffsetFilterFactory.CONSUME_ALL_TOKENS_KEY, Boolean.toString(consumeAll)).create(stream);
        assertTokenStreamContents(stream, new String[] { "A1", "B2" });
    }
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) StringReader(java.io.StringReader) Reader(java.io.Reader)

Example 62 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestFingerprintFilterFactory method test.

public void test() throws Exception {
    for (final boolean consumeAll : new boolean[] { true, false }) {
        Reader reader = new StringReader("A1 B2 A1 D4 C3");
        MockTokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
        tokenizer.setReader(reader);
        tokenizer.setEnableChecks(consumeAll);
        TokenStream stream = tokenizer;
        stream = tokenFilterFactory("Fingerprint", FingerprintFilterFactory.MAX_OUTPUT_TOKEN_SIZE_KEY, "256", FingerprintFilterFactory.SEPARATOR_KEY, "_").create(stream);
        assertTokenStreamContents(stream, new String[] { "A1_B2_C3_D4" });
    }
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) StringReader(java.io.StringReader) Reader(java.io.Reader)

Example 63 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestHyphenatedWordsFilter method testRandomString.

/** blast some random strings through the analyzer */
public void testRandomString() throws Exception {
    Analyzer a = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
            return new TokenStreamComponents(tokenizer, new HyphenatedWordsFilter(tokenizer));
        }
    };
    checkRandomData(random(), a, 1000 * RANDOM_MULTIPLIER);
    a.close();
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) Analyzer(org.apache.lucene.analysis.Analyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Example 64 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestHyphenatedWordsFilter method testOffsets.

public void testOffsets() throws Exception {
    String input = "abc- def geh 1234- 5678-";
    TokenStream ts = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    ((Tokenizer) ts).setReader(new StringReader(input));
    ts = new HyphenatedWordsFilter(ts);
    assertTokenStreamContents(ts, new String[] { "abcdef", "geh", "12345678-" }, new int[] { 0, 9, 13 }, new int[] { 8, 12, 24 });
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Example 65 with MockTokenizer

use of org.apache.lucene.analysis.MockTokenizer in project lucene-solr by apache.

the class TestHyphenatedWordsFilter method testHyphenAtEnd.

/**
   * Test that HyphenatedWordsFilter behaves correctly with a final hyphen
   */
public void testHyphenAtEnd() throws Exception {
    String input = "ecologi-\r\ncal devel-\r\n\r\nop compre-	hensive-hands-on and ecology-";
    // first test
    TokenStream ts = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    ((Tokenizer) ts).setReader(new StringReader(input));
    ts = new HyphenatedWordsFilter(ts);
    assertTokenStreamContents(ts, new String[] { "ecological", "develop", "comprehensive-hands-on", "and", "ecology-" });
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Aggregations

MockTokenizer (org.apache.lucene.analysis.MockTokenizer)280 Tokenizer (org.apache.lucene.analysis.Tokenizer)204 Analyzer (org.apache.lucene.analysis.Analyzer)161 StringReader (java.io.StringReader)118 TokenStream (org.apache.lucene.analysis.TokenStream)116 KeywordTokenizer (org.apache.lucene.analysis.core.KeywordTokenizer)106 Reader (java.io.Reader)59 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)54 CharArraySet (org.apache.lucene.analysis.CharArraySet)44 Directory (org.apache.lucene.store.Directory)36 Document (org.apache.lucene.document.Document)31 BytesRef (org.apache.lucene.util.BytesRef)25 SetKeywordMarkerFilter (org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter)21 TextField (org.apache.lucene.document.TextField)20 CannedTokenStream (org.apache.lucene.analysis.CannedTokenStream)18 Field (org.apache.lucene.document.Field)17 FieldType (org.apache.lucene.document.FieldType)14 StringField (org.apache.lucene.document.StringField)11 Input (org.apache.lucene.search.suggest.Input)11 InputArrayIterator (org.apache.lucene.search.suggest.InputArrayIterator)11