use of org.apache.lucene.analysis.standard.StandardTokenizer in project elasticsearch by elastic.
the class CJKFilterFactoryTests method testDefault.
public void testDefault() throws IOException {
ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_bigram");
String source = "多くの学生が試験に落ちた。";
String[] expected = new String[] { "多く", "くの", "の学", "学生", "生が", "が試", "試験", "験に", "に落", "落ち", "ちた" };
Tokenizer tokenizer = new StandardTokenizer();
tokenizer.setReader(new StringReader(source));
assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project elasticsearch by elastic.
the class MockRepeatAnalyzer method createComponents.
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new StandardTokenizer();
TokenStream repeatFilter = new MockRepeatFilter(tokenizer);
return new TokenStreamComponents(tokenizer, repeatFilter);
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project libresonic by Libresonic.
the class SearchService method analyzeQuery.
private String analyzeQuery(String query) throws IOException {
StringBuilder result = new StringBuilder();
ASCIIFoldingFilter filter = new ASCIIFoldingFilter(new StandardTokenizer(LUCENE_VERSION, new StringReader(query)));
TermAttribute termAttribute = filter.getAttribute(TermAttribute.class);
while (filter.incrementToken()) {
result.append(termAttribute.term()).append("* ");
}
return result.toString();
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project lucene-solr by apache.
the class UkrainianMorfologikAnalyzer method createComponents.
/**
* Creates a
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* which tokenizes all the text in the provided {@link Reader}.
*
* @return A
* {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
* built from an {@link StandardTokenizer} filtered with
* {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
* , {@link SetKeywordMarkerFilter} if a stem exclusion set is
* provided and {@link MorfologikFilter} on the Ukrainian dictionary.
*/
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer source = new StandardTokenizer();
TokenStream result = new StandardFilter(source);
result = new LowerCaseFilter(result);
result = new StopFilter(result, stopwords);
if (stemExclusionSet.isEmpty() == false) {
result = new SetKeywordMarkerFilter(result, stemExclusionSet);
}
result = new MorfologikFilter(result, getDictionary());
return new TokenStreamComponents(source, result);
}
use of org.apache.lucene.analysis.standard.StandardTokenizer in project lucene-solr by apache.
the class TestTypeTokenFilter method testTypeFilterWhitelist.
public void testTypeFilterWhitelist() throws IOException {
StringReader reader = new StringReader("121 is palindrome, while 123 is not");
Set<String> stopTypes = Collections.singleton("<NUM>");
final StandardTokenizer input = new StandardTokenizer(newAttributeFactory());
input.setReader(reader);
TokenStream stream = new TypeTokenFilter(input, stopTypes, true);
assertTokenStreamContents(stream, new String[] { "121", "123" });
}
Aggregations