use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class TestSwedishLightStemFilter method testKeyword.
public void testKeyword() throws IOException {
final CharArraySet exclusionSet = new CharArraySet(asSet("jaktkarlens"), false);
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer source = new MockTokenizer(MockTokenizer.WHITESPACE, false);
TokenStream sink = new SetKeywordMarkerFilter(source, exclusionSet);
return new TokenStreamComponents(source, new SwedishLightStemFilter(sink));
}
};
checkOneTerm(a, "jaktkarlens", "jaktkarlens");
a.close();
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class ShingleAnalyzerWrapperTest method testAltFillerToken.
public void testAltFillerToken() throws Exception {
Analyzer delegate = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
CharArraySet stopSet = StopFilter.makeStopSet("into");
Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
TokenFilter filter = new StopFilter(tokenizer, stopSet);
return new TokenStreamComponents(tokenizer, filter);
}
};
ShingleAnalyzerWrapper analyzer = new ShingleAnalyzerWrapper(delegate, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, ShingleFilter.DEFAULT_TOKEN_SEPARATOR, true, false, "--");
assertAnalyzesTo(analyzer, "please divide into shingles", new String[] { "please", "please divide", "divide", "divide --", "-- shingles", "shingles" }, new int[] { 0, 0, 7, 7, 19, 19 }, new int[] { 6, 13, 13, 19, 27, 27 }, new int[] { 1, 0, 1, 0, 1, 1 });
analyzer.close();
delegate = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
CharArraySet stopSet = StopFilter.makeStopSet("into");
Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
TokenFilter filter = new StopFilter(tokenizer, stopSet);
return new TokenStreamComponents(tokenizer, filter);
}
};
analyzer = new ShingleAnalyzerWrapper(delegate, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, ShingleFilter.DEFAULT_TOKEN_SEPARATOR, false, false, null);
assertAnalyzesTo(analyzer, "please divide into shingles", new String[] { "please divide", "divide ", " shingles" }, new int[] { 0, 7, 19 }, new int[] { 13, 19, 27 }, new int[] { 1, 1, 1 });
analyzer.close();
delegate = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
CharArraySet stopSet = StopFilter.makeStopSet("into");
Tokenizer tokenizer = new MockTokenizer(MockTokenizer.WHITESPACE, false);
TokenFilter filter = new StopFilter(tokenizer, stopSet);
return new TokenStreamComponents(tokenizer, filter);
}
};
analyzer = new ShingleAnalyzerWrapper(delegate, ShingleFilter.DEFAULT_MIN_SHINGLE_SIZE, ShingleFilter.DEFAULT_MAX_SHINGLE_SIZE, ShingleFilter.DEFAULT_TOKEN_SEPARATOR, false, false, "");
assertAnalyzesTo(analyzer, "please divide into shingles", new String[] { "please divide", "divide ", " shingles" }, new int[] { 0, 7, 19 }, new int[] { 13, 19, 27 }, new int[] { 1, 1, 1 });
analyzer.close();
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class TestRussianAnalyzer method testWithStemExclusionSet.
public void testWithStemExclusionSet() throws Exception {
CharArraySet set = new CharArraySet(1, true);
set.add("представление");
Analyzer a = new RussianAnalyzer(RussianAnalyzer.getDefaultStopSet(), set);
assertAnalyzesTo(a, "Вместе с тем о силе электромагнитной энергии имели представление еще", new String[] { "вмест", "сил", "электромагнитн", "энерг", "имел", "представление" });
a.close();
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class TestMorfologikAnalyzer method testKeywordAttrTokens.
/** */
public final void testKeywordAttrTokens() throws IOException {
Analyzer a = new MorfologikAnalyzer() {
@Override
protected TokenStreamComponents createComponents(String field) {
final CharArraySet keywords = new CharArraySet(1, false);
keywords.add("liście");
final Tokenizer src = new StandardTokenizer();
TokenStream result = new StandardFilter(src);
result = new SetKeywordMarkerFilter(result, keywords);
result = new MorfologikFilter(result);
return new TokenStreamComponents(src, result);
}
};
assertAnalyzesTo(a, "liście danych", new String[] { "liście", "dany", "dana", "dane", "dać" }, new int[] { 0, 7, 7, 7, 7 }, new int[] { 6, 13, 13, 13, 13 }, new int[] { 1, 1, 0, 0, 0 });
a.close();
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class StopFilter method makeStopSet.
/**
* Creates a stopword set from the given stopword list.
* @param stopWords A List of Strings or char[] or any other toString()-able list representing the stopwords
* @param ignoreCase if true, all words are lower cased first
* @return A Set ({@link CharArraySet}) containing the words
*/
public static CharArraySet makeStopSet(List<?> stopWords, boolean ignoreCase) {
CharArraySet stopSet = new CharArraySet(stopWords.size(), ignoreCase);
stopSet.addAll(stopWords);
return stopSet;
}
Aggregations