use of org.apache.lucene.analysis.standard.StandardTokenizer in project lucene-arabic-analyzer by msarhan.
the class ArabicRootExtractorAnalyzer method createComponents.
/**
* Creates {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} used to tokenize
* all the text in the provided {@link Reader}.
*
* @return {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} built from an
* {@link StandardTokenizer} filtered with {@link LowerCaseFilter}, {@link DecimalDigitFilter},
* {@link StopFilter}, {@link ArabicRootExtractorStemFilter}, {@link SetKeywordMarkerFilter}
*/
@Override
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardTokenizer();
TokenStream result = new org.apache.lucene.analysis.LowerCaseFilter(source);
result = new DecimalDigitFilter(result);
// the order here is important: the stopword list is not normalized!
result = new org.apache.lucene.analysis.StopFilter(result, stopwords);
// TODO maybe we should make ArabicNormalization filter also KeywordAttribute aware?!
result = new ArabicNormalizationFilter(result);
if (!stemExclusionSet.isEmpty()) {
result = new SetKeywordMarkerFilter(result, stemExclusionSet);
}
return new TokenStreamComponents(source, new ArabicRootExtractorStemFilter(result));
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project epadd by ePADD.
the class EnglishNumberAnalyzer method createComponents.
/*
* Creates a {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} which
* tokenizes all the text in the reader that is set after the instantiation of this analyzer .
*
* @return A {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents} built from an {@link StandardTokenizer} filtered with {@link StandardFilter}, {@link EnglishPossessiveFilter}, {@link LowerCaseFilter}, {@link StopFilter} , {@link SetKeywordMarkerFilter} if a stem exclusion set is
* provided and {@link PorterStemFilter}.
**/
protected TokenStreamComponents createComponents(String fieldName) {
final Tokenizer source = new StandardNumberTokenizer();
TokenStream result = new StandardFilter(source);
// @TODO document what each of these filters do
result = new EnglishPossessiveFilter(result);
result = new LowerCaseFilter(result);
result = new StopFilter(result, this.stopwords);
if (!this.stemExclusionSet.isEmpty()) {
result = new SetKeywordMarkerFilter((TokenStream) result, this.stemExclusionSet);
}
result = new PorterStemFilter((TokenStream) result);
return new TokenStreamComponents(source, result);
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project vertigo by KleeGroup.
the class DefaultAnalyzer method createComponents.
/**
* Creates a TokenStream which tokenizes all the text in the provided Reader.
*
* @return A TokenStream build from a StandardTokenizer filtered with
* StandardFilter, StopFilter, FrenchStemFilter and LowerCaseFilter
*/
@Override
protected TokenStreamComponents createComponents(final String fieldName) {
/* initialisation du token */
final Tokenizer source = new StandardTokenizer();
// -----
/* on retire les élisions*/
final CharArraySet elisionSet = new CharArraySet(Arrays.asList(LuceneConstants.ELISION_ARTICLES), true);
TokenStream filter = new ElisionFilter(source, elisionSet);
/* on retire article adjectif */
filter = new StopFilter(filter, stopWords);
/* on retire les accents */
filter = new ASCIIFoldingFilter(filter);
/* on met en minuscule */
filter = new LowerCaseFilter(filter);
return new TokenStreamComponents(source, filter);
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project crate by crate.
the class StandardHtmlStripAnalyzer method createComponents.
@Override
protected TokenStreamComponents createComponents(final String fieldName) {
final Tokenizer src = new StandardTokenizer();
TokenStream tok = new LowerCaseFilter(src);
if (!stopwords.isEmpty()) {
tok = new StopFilter(tok, stopwords);
}
return new TokenStreamComponents(src, tok);
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project nutch by apache.
the class LuceneTokenizer method createNGramTokenStream.
private TokenStream createNGramTokenStream(String content, int mingram, int maxgram) {
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader(content));
tokenStream = new LowerCaseFilter(tokenizer);
tokenStream = applyStemmer(stemFilterType);
ShingleFilter shingleFilter = new ShingleFilter(tokenStream, mingram, maxgram);
shingleFilter.setOutputUnigrams(false);
tokenStream = (TokenStream) shingleFilter;
return tokenStream;
}
Aggregations