use of org.apache.lucene.analysis.standard.StandardFilter in project lucene-solr by apache.
the class RussianAnalyzer method normalize.
@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
TokenStream result = new StandardFilter(in);
result = new LowerCaseFilter(result);
return result;
}
use of org.apache.lucene.analysis.standard.StandardFilter in project lucene-solr by apache.
the class SwedishAnalyzer method createComponents.
/**
* Creates a
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* which tokenizes all the text in the provided {@link Reader}.
*
* @return A
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* built from an {@link StandardTokenizer} filtered with
* {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
* , {@link SetKeywordMarkerFilter} if a stem exclusion set is
* provided and {@link SnowballFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer();
TokenStream result = new StandardFilter(source);
result = new LowerCaseFilter(result);
result = new StopFilter(result, stopwords);
if (!stemExclusionSet.isEmpty())
result = new SetKeywordMarkerFilter(result, stemExclusionSet);
result = new SnowballFilter(result, new SwedishStemmer());
return new TokenStreamComponents(source, result);
}
use of org.apache.lucene.analysis.standard.StandardFilter in project lucene-solr by apache.
the class PortugueseAnalyzer method normalize.
@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
TokenStream result = new StandardFilter(in);
result = new LowerCaseFilter(result);
return result;
}
use of org.apache.lucene.analysis.standard.StandardFilter in project lucene-solr by apache.
the class PortugueseAnalyzer method createComponents.
/**
* Creates a
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* which tokenizes all the text in the provided {@link Reader}.
*
* @return A
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* built from an {@link StandardTokenizer} filtered with
* {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
* , {@link SetKeywordMarkerFilter} if a stem exclusion set is
* provided and {@link PortugueseLightStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer();
TokenStream result = new StandardFilter(source);
result = new LowerCaseFilter(result);
result = new StopFilter(result, stopwords);
if (!stemExclusionSet.isEmpty())
result = new SetKeywordMarkerFilter(result, stemExclusionSet);
result = new PortugueseLightStemFilter(result);
return new TokenStreamComponents(source, result);
}
use of org.apache.lucene.analysis.standard.StandardFilter in project epadd by ePADD.
the class EnglishNumberAnalyzer method createComponents.
/*
* Creates a {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} which
* tokenizes all the text in the reader that is set after the instantiation of this analyzer .
*
* @return A {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} built from an {@link StandardTokenizer} filtered with {@link StandardFilter}, {@link EnglishPossessiveFilter}, {@link LowerCaseFilter}, {@link StopFilter} , {@link SetKeywordMarkerFilter} if a stem exclusion set is
* provided and {@link PorterStemFilter}.
**/
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardNumberTokenizer();
TokenStream result = new StandardFilter(source);
// @TODO document what each of these filters do
result = new EnglishPossessiveFilter(result);
result = new LowerCaseFilter(result);
result = new StopFilter(result, this.stopwords);
if (!this.stemExclusionSet.isEmpty()) {
result = new SetKeywordMarkerFilter((TokenStream) result, this.stemExclusionSet);
}
result = new PorterStemFilter((TokenStream) result);
return new TokenStreamComponents(source, result);
}
Aggregations