Search in sources :

Example 91 with TokenStream

use of org.apache.lucene.analysis.TokenStream in project lucene-solr by apache.

the class TestSimplePatternTokenizer method testBasic.

public void testBasic() throws Exception {
    // get stuff between "'"
    String qpattern = "\\'([^\\']+)\\'";
    String[][] tests = { // pattern        input                    output
    { ":", "boo:and:foo", ": :" }, { qpattern, "aaa 'bbb' 'ccc'", "'bbb' 'ccc'" } };
    for (String[] test : tests) {
        TokenStream stream = new SimplePatternTokenizer(test[0]);
        ((Tokenizer) stream).setReader(new StringReader(test[1]));
        String out = tsToString(stream);
        assertEquals("pattern: " + test[0] + " with input: " + test[1], test[2], out);
    }
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 92 with TokenStream

use of org.apache.lucene.analysis.TokenStream in project lucene-solr by apache.

the class TestReverseStringFilter method testFilterWithMark.

public void testFilterWithMark() throws Exception {
    // 1-4 length string
    TokenStream stream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    ((Tokenizer) stream).setReader(new StringReader("Do have a nice day"));
    ReverseStringFilter filter = new ReverseStringFilter(stream, '');
    assertTokenStreamContents(filter, new String[] { "oD", "evah", "a", "ecin", "yad" });
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Example 93 with TokenStream

use of org.apache.lucene.analysis.TokenStream in project lucene-solr by apache.

the class TestDelimitedPayloadTokenFilterFactory method testDelim.

public void testDelim() throws Exception {
    Reader reader = new StringReader("the*0.1 quick*0.1 red*0.1");
    TokenStream stream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    ((Tokenizer) stream).setReader(reader);
    stream = tokenFilterFactory("DelimitedPayload", "encoder", "float", "delimiter", "*").create(stream);
    stream.reset();
    while (stream.incrementToken()) {
        PayloadAttribute payAttr = stream.getAttribute(PayloadAttribute.class);
        assertNotNull(payAttr);
        byte[] payData = payAttr.getPayload().bytes;
        assertNotNull(payData);
        float payFloat = PayloadHelper.decodeFloat(payData);
        assertEquals(0.1f, payFloat, 0.0f);
    }
    stream.end();
    stream.close();
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) PayloadAttribute(org.apache.lucene.analysis.tokenattributes.PayloadAttribute) StringReader(java.io.StringReader) StringReader(java.io.StringReader) Reader(java.io.Reader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer)

Example 94 with TokenStream

use of org.apache.lucene.analysis.TokenStream in project lucene-solr by apache.

the class TestDelimitedPayloadTokenFilterFactory method testEncoder.

public void testEncoder() throws Exception {
    Reader reader = new StringReader("the|0.1 quick|0.1 red|0.1");
    TokenStream stream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    ((Tokenizer) stream).setReader(reader);
    stream = tokenFilterFactory("DelimitedPayload", "encoder", "float").create(stream);
    stream.reset();
    while (stream.incrementToken()) {
        PayloadAttribute payAttr = stream.getAttribute(PayloadAttribute.class);
        assertNotNull(payAttr);
        byte[] payData = payAttr.getPayload().bytes;
        assertNotNull(payData);
        float payFloat = PayloadHelper.decodeFloat(payData);
        assertEquals(0.1f, payFloat, 0.0f);
    }
    stream.end();
    stream.close();
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenStream(org.apache.lucene.analysis.TokenStream) PayloadAttribute(org.apache.lucene.analysis.tokenattributes.PayloadAttribute) StringReader(java.io.StringReader) StringReader(java.io.StringReader) Reader(java.io.Reader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer)

Example 95 with TokenStream

use of org.apache.lucene.analysis.TokenStream in project lucene-solr by apache.

the class TestRussianLightStemFilter method testKeyword.

public void testKeyword() throws IOException {
    final CharArraySet exclusionSet = new CharArraySet(asSet("энергии"), false);
    Analyzer a = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
            TokenStream sink = new SetKeywordMarkerFilter(source, exclusionSet);
            return new TokenStreamComponents(source, new RussianLightStemFilter(sink));
        }
    };
    checkOneTerm(a, "энергии", "энергии");
    a.close();
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) CharArraySet(org.apache.lucene.analysis.CharArraySet) TokenStream(org.apache.lucene.analysis.TokenStream) SetKeywordMarkerFilter(org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter) Analyzer(org.apache.lucene.analysis.Analyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer)

Aggregations

TokenStream (org.apache.lucene.analysis.TokenStream)849 StringReader (java.io.StringReader)337 Tokenizer (org.apache.lucene.analysis.Tokenizer)244 Reader (java.io.Reader)175 CharTermAttribute (org.apache.lucene.analysis.tokenattributes.CharTermAttribute)141 MockTokenizer (org.apache.lucene.analysis.MockTokenizer)128 Analyzer (org.apache.lucene.analysis.Analyzer)121 CannedTokenStream (org.apache.lucene.analysis.CannedTokenStream)94 LowerCaseFilter (org.apache.lucene.analysis.LowerCaseFilter)88 IOException (java.io.IOException)86 StandardFilter (org.apache.lucene.analysis.standard.StandardFilter)73 Term (org.apache.lucene.index.Term)66 Document (org.apache.lucene.document.Document)64 ArrayList (java.util.ArrayList)59 StandardTokenizer (org.apache.lucene.analysis.standard.StandardTokenizer)59 StopFilter (org.apache.lucene.analysis.StopFilter)58 KeywordTokenizer (org.apache.lucene.analysis.core.KeywordTokenizer)57 SetKeywordMarkerFilter (org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter)53 Test (org.junit.Test)53 OffsetAttribute (org.apache.lucene.analysis.tokenattributes.OffsetAttribute)47