use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.
the class WikipediaTokenizerTest method testSimple.
public void testSimple() throws Exception {
String text = "This is a [[Category:foo]]";
WikipediaTokenizer tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
tf.setReader(new StringReader(text));
assertTokenStreamContents(tf, new String[] { "This", "is", "a", "foo" }, new int[] { 0, 5, 8, 21 }, new int[] { 4, 7, 9, 24 }, new String[] { "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", CATEGORY }, new int[] { 1, 1, 1, 1 }, text.length());
}
use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.
the class WikipediaTokenizerTest method testLinkPhrases.
public void testLinkPhrases() throws Exception {
WikipediaTokenizer tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
tf.setReader(new StringReader(LINK_PHRASES));
checkLinkPhrases(tf);
}
use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.
the class WikipediaTokenizerTest method testBoth.
public void testBoth() throws Exception {
Set<String> untoks = new HashSet<>();
untoks.add(WikipediaTokenizer.CATEGORY);
untoks.add(WikipediaTokenizer.ITALICS);
String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h i j]]";
//should output all the indivual tokens plus the untokenized tokens as well. Untokenized tokens
WikipediaTokenizer tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.BOTH, untoks);
tf.setReader(new StringReader(test));
assertTokenStreamContents(tf, new String[] { "a b c d", "a", "b", "c", "d", "e f g", "e", "f", "g", "link", "here", "link", "there", "italics here", "italics", "here", "something", "more italics", "more", "italics", "h i j", "h", "i", "j" }, new int[] { 11, 11, 13, 15, 17, 32, 32, 34, 36, 42, 47, 56, 61, 71, 71, 79, 86, 98, 98, 103, 124, 124, 128, 132 }, new int[] { 18, 12, 14, 16, 18, 37, 33, 35, 37, 46, 51, 60, 66, 83, 78, 83, 95, 110, 102, 110, 133, 125, 129, 133 }, new int[] { 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1 });
// now check the flags, TODO: add way to check flags from BaseTokenStreamTestCase?
tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.BOTH, untoks);
tf.setReader(new StringReader(test));
int[] expectedFlags = new int[] { UNTOKENIZED_TOKEN_FLAG, 0, 0, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, 0, 0, 0, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, 0 };
FlagsAttribute flagsAtt = tf.addAttribute(FlagsAttribute.class);
tf.reset();
for (int i = 0; i < expectedFlags.length; i++) {
assertTrue(tf.incrementToken());
assertEquals("flags " + i, expectedFlags[i], flagsAtt.getFlags());
}
assertFalse(tf.incrementToken());
tf.close();
}
use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.
the class WikipediaTokenizerTest method testRandomHugeStrings.
/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
Random random = random();
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
return new TokenStreamComponents(tokenizer, tokenizer);
}
};
// TODO: properly support positionLengthAttribute
checkRandomData(random, a, 100 * RANDOM_MULTIPLIER, 8192, false, false);
a.close();
}
use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.
the class WikipediaTokenizerTest method testRandomStrings.
/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
Analyzer a = new Analyzer() {
@Override
protected TokenStreamComponents createComponents(String fieldName) {
Tokenizer tokenizer = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
return new TokenStreamComponents(tokenizer, tokenizer);
}
};
// TODO: properly support positionLengthAttribute
checkRandomData(random(), a, 1000 * RANDOM_MULTIPLIER, 20, false, false);
a.close();
}
Aggregations