use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class SoraniAnalyzer method normalize.
@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
TokenStream result = new StandardFilter(in);
result = new SoraniNormalizationFilter(result);
result = new LowerCaseFilter(result);
result = new DecimalDigitFilter(result);
return result;
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class TestGermanStemFilter method setUp.
@Override
public void setUp() throws Exception {
super.setUp();
analyzer = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer t = new MockTokenizer(MockTokenizer.KEYWORD, false);
return new TokenStreamComponents(t, new GermanStemFilter(new LowerCaseFilter(t)));
}
};
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class TestTeeSinkTokenFilter method testMultipleSources.
public void testMultipleSources() throws Exception {
final TeeSinkTokenFilter tee1 = new TeeSinkTokenFilter(whitespaceMockTokenizer(buffer1.toString()));
final TokenStream source1 = new CachingTokenFilter(tee1);
tee1.addAttribute(CheckClearAttributesAttribute.class);
MockTokenizer tokenizer = new MockTokenizer(tee1.getAttributeFactory(), MockTokenizer.WHITESPACE, false);
tokenizer.setReader(new StringReader(buffer2.toString()));
final TeeSinkTokenFilter tee2 = new TeeSinkTokenFilter(tokenizer);
final TokenStream source2 = tee2;
assertTokenStreamContents(source1, tokens1);
assertTokenStreamContents(source2, tokens2);
TokenStream lowerCasing = new LowerCaseFilter(source1);
String[] lowerCaseTokens = new String[tokens1.length];
for (int i = 0; i < tokens1.length; i++) lowerCaseTokens[i] = tokens1[i].toLowerCase(Locale.ROOT);
assertTokenStreamContents(lowerCasing, lowerCaseTokens);
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class FrenchAnalyzer method createComponents.
/**
* Creates
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* used to tokenize all the text in the provided {@link Reader}.
*
* @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* built from a {@link StandardTokenizer} filtered with
* {@link StandardFilter}, {@link ElisionFilter},
* {@link LowerCaseFilter}, {@link StopFilter},
* {@link SetKeywordMarkerFilter} if a stem exclusion set is
* provided, and {@link FrenchLightStemFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer();
TokenStream result = new StandardFilter(source);
result = new ElisionFilter(result, DEFAULT_ARTICLES);
result = new LowerCaseFilter(result);
result = new StopFilter(result, stopwords);
if (!excltable.isEmpty())
result = new SetKeywordMarkerFilter(result, excltable);
result = new FrenchLightStemFilter(result);
return new TokenStreamComponents(source, result);
}
use of org.apache.lucene.analysis.LowerCaseFilter in project lucene-solr by apache.
the class FrenchAnalyzer method normalize.
@Override
protected TokenStream normalize(String fieldName, TokenStream in) {
TokenStream result = new StandardFilter(in);
result = new ElisionFilter(result, DEFAULT_ARTICLES);
result = new LowerCaseFilter(result);
return result;
}
Aggregations