Search in sources :

Example 6 with LowerCaseFilter

use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.

the class RussianAnalyzer method createComponents.

/**
   * Creates
   * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
   * used to tokenize all the text in the provided {@link Reader}.
   * 
   * @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
   *         built from a {@link StandardTokenizer} filtered with
   *         {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
   *         , {@link SetKeywordMarkerFilter} if a stem exclusion set is
   *         provided, and {@link SnowballFilter}
   */
@Override
protected TokenStreamComponents createComponents(String fieldName) {
    final Tokenizer source = new StandardTokenizer();
    TokenStream result = new StandardFilter(source);
    result = new LowerCaseFilter(result);
    result = new StopFilter(result, stopwords);
    if (!stemExclusionSet.isEmpty())
        result = new SetKeywordMarkerFilter(result, stemExclusionSet);
    result = new SnowballFilter(result, new org.tartarus.snowball.ext.RussianStemmer());
    return new TokenStreamComponents(source, result);
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) StopFilter(org.apache.lucene.analysis.StopFilter) SetKeywordMarkerFilter(org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter) StandardFilter(org.apache.lucene.analysis.standard.StandardFilter) SnowballFilter(org.apache.lucene.analysis.snowball.SnowballFilter) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter)

Example 7 with LowerCaseFilter

use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.

the class ClassicAnalyzer method createComponents.

@Override
protected TokenStreamComponents createComponents(final String fieldName) {
    final ClassicTokenizer src = new ClassicTokenizer();
    src.setMaxTokenLength(maxTokenLength);
    TokenStream tok = new ClassicFilter(src);
    tok = new LowerCaseFilter(tok);
    tok = new StopFilter(tok, stopwords);
    return new TokenStreamComponents(src, tok) {

        @Override
        protected void setReader(final Reader reader) {
            src.setMaxTokenLength(ClassicAnalyzer.this.maxTokenLength);
            super.setReader(reader);
        }
    };
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) StopFilter(org.apache.lucene.analysis.StopFilter) Reader(java.io.Reader) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter)

Example 8 with LowerCaseFilter

use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.

the class ThaiAnalyzer method normalize.

@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
    TokenStream result = new LowerCaseFilter(in);
    result = new DecimalDigitFilter(result);
    return result;
}
Also used : DecimalDigitFilter(org.apache.lucene.analysis.core.DecimalDigitFilter) TokenStream(org.apache.lucene.analysis.TokenStream) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter)

Example 9 with LowerCaseFilter

use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.

the class CJKAnalyzer method normalize.

@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
    TokenStream result = new CJKWidthFilter(in);
    result = new LowerCaseFilter(result);
    return result;
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter)

Example 10 with LowerCaseFilter

use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.

the class CJKAnalyzer method createComponents.

@Override
protected TokenStreamComponents createComponents(String fieldName) {
    final Tokenizer source = new StandardTokenizer();
    // run the widthfilter first before bigramming, it sometimes combines characters.
    TokenStream result = new CJKWidthFilter(source);
    result = new LowerCaseFilter(result);
    result = new CJKBigramFilter(result);
    return new TokenStreamComponents(source, new StopFilter(result, stopwords));
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) StopFilter(org.apache.lucene.analysis.StopFilter) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter)

Aggregations

LowerCaseFilter (org.apache.lucene.analysis.LowerCaseFilter)85 TokenStream (org.apache.lucene.analysis.TokenStream)78 StandardFilter (org.apache.lucene.analysis.standard.StandardFilter)58 Tokenizer (org.apache.lucene.analysis.Tokenizer)45 StopFilter (org.apache.lucene.analysis.StopFilter)42 StandardTokenizer (org.apache.lucene.analysis.standard.StandardTokenizer)38 SetKeywordMarkerFilter (org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter)29 SnowballFilter (org.apache.lucene.analysis.snowball.SnowballFilter)13 DecimalDigitFilter (org.apache.lucene.analysis.core.DecimalDigitFilter)10 Analyzer (org.apache.lucene.analysis.Analyzer)8 ElisionFilter (org.apache.lucene.analysis.util.ElisionFilter)5 StringReader (java.io.StringReader)4 HashMap (java.util.HashMap)4 TokenFilter (org.apache.lucene.analysis.TokenFilter)4 PerFieldAnalyzerWrapper (org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper)4 Document (org.apache.lucene.document.Document)4 Field (org.apache.lucene.document.Field)4 TextField (org.apache.lucene.document.TextField)4 IndexWriter (org.apache.lucene.index.IndexWriter)4 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)4