Search in sources :

Example 1 with WikipediaTokenizer

use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.

the class WikipediaTokenizerTest method testSimple.

public void testSimple() throws Exception {
    String text = "This is a [[Category:foo]]";
    WikipediaTokenizer tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
    tf.setReader(new StringReader(text));
    assertTokenStreamContents(tf, new String[] { "This", "is", "a", "foo" }, new int[] { 0, 5, 8, 21 }, new int[] { 4, 7, 9, 24 }, new String[] { "<ALPHANUM>", "<ALPHANUM>", "<ALPHANUM>", CATEGORY }, new int[] { 1, 1, 1, 1 }, text.length());
}
Also used : WikipediaTokenizer(org.apache.lucene.analysis.wikipedia.WikipediaTokenizer) StringReader(java.io.StringReader)

Example 2 with WikipediaTokenizer

use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.

the class WikipediaTokenizerTest method testLinkPhrases.

public void testLinkPhrases() throws Exception {
    WikipediaTokenizer tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
    tf.setReader(new StringReader(LINK_PHRASES));
    checkLinkPhrases(tf);
}
Also used : WikipediaTokenizer(org.apache.lucene.analysis.wikipedia.WikipediaTokenizer) StringReader(java.io.StringReader)

Example 3 with WikipediaTokenizer

use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.

the class WikipediaTokenizerTest method testBoth.

public void testBoth() throws Exception {
    Set<String> untoks = new HashSet<>();
    untoks.add(WikipediaTokenizer.CATEGORY);
    untoks.add(WikipediaTokenizer.ITALICS);
    String test = "[[Category:a b c d]] [[Category:e f g]] [[link here]] [[link there]] ''italics here'' something ''more italics'' [[Category:h   i   j]]";
    //should output all the indivual tokens plus the untokenized tokens as well.  Untokenized tokens
    WikipediaTokenizer tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.BOTH, untoks);
    tf.setReader(new StringReader(test));
    assertTokenStreamContents(tf, new String[] { "a b c d", "a", "b", "c", "d", "e f g", "e", "f", "g", "link", "here", "link", "there", "italics here", "italics", "here", "something", "more italics", "more", "italics", "h   i   j", "h", "i", "j" }, new int[] { 11, 11, 13, 15, 17, 32, 32, 34, 36, 42, 47, 56, 61, 71, 71, 79, 86, 98, 98, 103, 124, 124, 128, 132 }, new int[] { 18, 12, 14, 16, 18, 37, 33, 35, 37, 46, 51, 60, 66, 83, 78, 83, 95, 110, 102, 110, 133, 125, 129, 133 }, new int[] { 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1 });
    // now check the flags, TODO: add way to check flags from BaseTokenStreamTestCase?
    tf = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.BOTH, untoks);
    tf.setReader(new StringReader(test));
    int[] expectedFlags = new int[] { UNTOKENIZED_TOKEN_FLAG, 0, 0, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, 0, 0, 0, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, UNTOKENIZED_TOKEN_FLAG, 0, 0, 0 };
    FlagsAttribute flagsAtt = tf.addAttribute(FlagsAttribute.class);
    tf.reset();
    for (int i = 0; i < expectedFlags.length; i++) {
        assertTrue(tf.incrementToken());
        assertEquals("flags " + i, expectedFlags[i], flagsAtt.getFlags());
    }
    assertFalse(tf.incrementToken());
    tf.close();
}
Also used : WikipediaTokenizer(org.apache.lucene.analysis.wikipedia.WikipediaTokenizer) FlagsAttribute(org.apache.lucene.analysis.tokenattributes.FlagsAttribute) StringReader(java.io.StringReader) HashSet(java.util.HashSet)

Example 4 with WikipediaTokenizer

use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.

the class WikipediaTokenizerTest method testRandomHugeStrings.

/** blast some random large strings through the analyzer */
public void testRandomHugeStrings() throws Exception {
    Random random = random();
    Analyzer a = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
            return new TokenStreamComponents(tokenizer, tokenizer);
        }
    };
    // TODO: properly support positionLengthAttribute
    checkRandomData(random, a, 100 * RANDOM_MULTIPLIER, 8192, false, false);
    a.close();
}
Also used : WikipediaTokenizer(org.apache.lucene.analysis.wikipedia.WikipediaTokenizer) Random(java.util.Random) Analyzer(org.apache.lucene.analysis.Analyzer) WikipediaTokenizer(org.apache.lucene.analysis.wikipedia.WikipediaTokenizer) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 5 with WikipediaTokenizer

use of org.apache.lucene.analysis.wikipedia.WikipediaTokenizer in project lucene-solr by apache.

the class WikipediaTokenizerTest method testRandomStrings.

/** blast some random strings through the analyzer */
public void testRandomStrings() throws Exception {
    Analyzer a = new Analyzer() {

        @Override
        protected TokenStreamComponents createComponents(String fieldName) {
            Tokenizer tokenizer = new WikipediaTokenizer(newAttributeFactory(), WikipediaTokenizer.TOKENS_ONLY, Collections.<String>emptySet());
            return new TokenStreamComponents(tokenizer, tokenizer);
        }
    };
    // TODO: properly support positionLengthAttribute
    checkRandomData(random(), a, 1000 * RANDOM_MULTIPLIER, 20, false, false);
    a.close();
}
Also used : WikipediaTokenizer(org.apache.lucene.analysis.wikipedia.WikipediaTokenizer) Analyzer(org.apache.lucene.analysis.Analyzer) WikipediaTokenizer(org.apache.lucene.analysis.wikipedia.WikipediaTokenizer) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Aggregations

WikipediaTokenizer (org.apache.lucene.analysis.wikipedia.WikipediaTokenizer)9 StringReader (java.io.StringReader)6 Analyzer (org.apache.lucene.analysis.Analyzer)3 Tokenizer (org.apache.lucene.analysis.Tokenizer)3 HashSet (java.util.HashSet)2 Random (java.util.Random)1 CharArraySet (org.apache.lucene.analysis.CharArraySet)1 MockTokenizer (org.apache.lucene.analysis.MockTokenizer)1 TokenStream (org.apache.lucene.analysis.TokenStream)1 WordDelimiterFilter (org.apache.lucene.analysis.miscellaneous.WordDelimiterFilter)1 EdgeNGramTokenizer (org.apache.lucene.analysis.ngram.EdgeNGramTokenizer)1 FlagsAttribute (org.apache.lucene.analysis.tokenattributes.FlagsAttribute)1