use of org.apache.lucene.analysis.core.KeywordTokenizer in project lucene-solr by apache.
the class TestCodepointCountFilter method testRandomStrings.
public void testRandomStrings() throws IOException {
for (int i = 0; i < 10000; i++) {
String text = TestUtil.randomUnicodeString(random(), 100);
int min = TestUtil.nextInt(random(), 0, 100);
int max = TestUtil.nextInt(random(), 0, 100);
int count = text.codePointCount(0, text.length());
if (min > max) {
int temp = min;
min = max;
max = temp;
}
boolean expected = count >= min && count <= max;
TokenStream stream = new KeywordTokenizer();
((Tokenizer) stream).setReader(new StringReader(text));
stream = new CodepointCountFilter(stream, min, max);
stream.reset();
assertEquals(expected, stream.incrementToken());
stream.end();
stream.close();
}
}
use of org.apache.lucene.analysis.core.KeywordTokenizer in project lucene-solr by apache.
the class TestLengthFilter method testEmptyTerm.
public void testEmptyTerm() throws IOException {
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new KeywordTokenizer();
return new TokenStreamComponents(tokenizer, new LengthFilter(tokenizer, 0, 5));
}
};
checkOneTerm(a, "", "");
a.close();
}
use of org.apache.lucene.analysis.core.KeywordTokenizer in project lucene-solr by apache.
the class TestSerbianNormalizationRegularFilter method testEmptyTerm.
public void testEmptyTerm() throws IOException {
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new KeywordTokenizer();
return new TokenStreamComponents(tokenizer, new SerbianNormalizationRegularFilter(tokenizer));
}
};
checkOneTerm(a, "", "");
a.close();
}
use of org.apache.lucene.analysis.core.KeywordTokenizer in project lucene-solr by apache.
the class TestCompoundWordTokenFilter method testEmptyTerm.
public void testEmptyTerm() throws Exception {
final CharArraySet dict = makeDictionary("a", "e", "i", "o", "u", "y", "bc", "def");
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new KeywordTokenizer();
return new TokenStreamComponents(tokenizer, new DictionaryCompoundWordTokenFilter(tokenizer, dict));
}
};
checkOneTerm(a, "", "");
a.close();
InputSource is = new InputSource(getClass().getResource("da_UTF8.xml").toExternalForm());
final HyphenationTree hyphenator = HyphenationCompoundWordTokenFilter.getHyphenationTree(is);
Analyzer b = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new KeywordTokenizer();
TokenFilter filter = new HyphenationCompoundWordTokenFilter(tokenizer, hyphenator);
return new TokenStreamComponents(tokenizer, filter);
}
};
checkOneTerm(b, "", "");
b.close();
}
use of org.apache.lucene.analysis.core.KeywordTokenizer in project lucene-solr by apache.
the class TestCJKWidthFilter method testEmptyTerm.
public void testEmptyTerm() throws IOException {
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new KeywordTokenizer();
return new TokenStreamComponents(tokenizer, new CJKWidthFilter(tokenizer));
}
};
checkOneTerm(a, "", "");
a.close();
}
Aggregations