use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class ClassicAnalyzer method createComponents.
@Override
protected TokenStreamComponents createComponents(final String fieldName) {
final ClassicTokenizer src = new ClassicTokenizer();
src.setMaxTokenLength(maxTokenLength);
TokenStream tok = new ClassicFilter(src);
tok = new LowerCaseFilter(tok);
tok = new StopFilter(tok, stopwords);
return new TokenStreamComponents(src, tok) {
@Override
protected void setReader(final Reader reader) {
src.setMaxTokenLength(ClassicAnalyzer.this.maxTokenLength);
super.setReader(reader);
}
};
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class ThaiAnalyzer method normalize.
@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
TokenStream result = new LowerCaseFilter(in);
result = new DecimalDigitFilter(result);
return result;
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class CJKAnalyzer method normalize.
@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
TokenStream result = new CJKWidthFilter(in);
result = new LowerCaseFilter(result);
return result;
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class CJKAnalyzer method createComponents.
@Override
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer();
// run the widthfilter first before bigramming, it sometimes combines characters.
TokenStream result = new CJKWidthFilter(source);
result = new LowerCaseFilter(result);
result = new CJKBigramFilter(result);
return new TokenStreamComponents(source, new StopFilter(result, stopwords));
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class ArabicAnalyzer method createComponents.
/**
* Creates
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* used to tokenize all the text in the provided {@link Reader}.
*
* @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* built from an {@link StandardTokenizer} filtered with
* {@link LowerCaseFilter}, {@link DecimalDigitFilter}, {@link StopFilter},
* {@link ArabicNormalizationFilter}, {@link SetKeywordMarkerFilter}
* if a stem exclusion set is provided and {@link ArabicStemFilter}.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer();
TokenStream result = new LowerCaseFilter(source);
result = new DecimalDigitFilter(result);
// the order here is important: the stopword list is not normalized!
result = new StopFilter(result, stopwords);
// TODO maybe we should make ArabicNormalization filter also KeywordAttribute aware?!
result = new ArabicNormalizationFilter(result);
if (!stemExclusionSet.isEmpty()) {
result = new SetKeywordMarkerFilter(result, stemExclusionSet);
}
return new TokenStreamComponents(source, new ArabicStemFilter(result));
}
Aggregations