use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class StopFilter method makeStopSet.
/**
* Creates a stopword set from the given stopword array.
*
* @param stopWords An array of stopwords
* @param ignoreCase If true, all words are lower cased first.
* @return a Set containing the words
*/
public static CharArraySet makeStopSet(String[] stopWords, boolean ignoreCase) {
CharArraySet stopSet = new CharArraySet(stopWords.length, ignoreCase);
stopSet.addAll(Arrays.asList(stopWords));
return stopSet;
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class TestSuggestStopFilter method testEndIsStopWord.
public void testEndIsStopWord() throws Exception {
CharArraySet stopWords = StopFilter.makeStopSet("to");
Tokenizer stream = new MockTokenizer();
stream.setReader(new StringReader("go to "));
TokenStream filter = new SuggestStopFilter(stream, stopWords);
filter = new SuggestStopFilter(stream, stopWords);
assertTokenStreamContents(filter, new String[] { "go" }, new int[] { 0 }, new int[] { 2 }, null, new int[] { 1 }, null, 6, new boolean[] { false }, true);
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class TestSuggestStopFilter method testMultipleStopWordsEnd.
public void testMultipleStopWordsEnd() throws Exception {
CharArraySet stopWords = StopFilter.makeStopSet("to", "the", "a");
Tokenizer stream = new MockTokenizer();
stream.setReader(new StringReader("go to a the"));
TokenStream filter = new SuggestStopFilter(stream, stopWords);
filter = new SuggestStopFilter(stream, stopWords);
assertTokenStreamContents(filter, new String[] { "go", "the" }, new int[] { 0, 8 }, new int[] { 2, 11 }, null, new int[] { 1, 3 }, null, 11, new boolean[] { false, true }, true);
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class TestStopAnalyzer method testStopListPositions.
public void testStopListPositions() throws IOException {
CharArraySet stopWordsSet = new CharArraySet(asSet("good", "test", "analyzer"), false);
StopAnalyzer newStop = new StopAnalyzer(stopWordsSet);
String s = "This is a good test of the english stop analyzer with positions";
int[] expectedIncr = { 1, 1, 1, 3, 1, 1, 1, 2, 1 };
try (TokenStream stream = newStop.tokenStream("test", s)) {
assertNotNull(stream);
int i = 0;
CharTermAttribute termAtt = stream.getAttribute(CharTermAttribute.class);
PositionIncrementAttribute posIncrAtt = stream.addAttribute(PositionIncrementAttribute.class);
stream.reset();
while (stream.incrementToken()) {
String text = termAtt.toString();
assertFalse(stopWordsSet.contains(text));
assertEquals(expectedIncr[i++], posIncrAtt.getPositionIncrement());
}
stream.end();
}
newStop.close();
}
use of org.apache.lucene.analysis.CharArraySet in project lucene-solr by apache.
the class TestLithuanianAnalyzer method testStemExclusion.
/** Test stemmer exceptions */
public void testStemExclusion() throws IOException {
CharArraySet set = new CharArraySet(1, true);
set.add("vaikų");
Analyzer a = new LithuanianAnalyzer(CharArraySet.EMPTY_SET, set);
assertAnalyzesTo(a, "vaikų", new String[] { "vaikų" });
}
Aggregations