Search in sources :

Example 21 with CharFilter

use of org.apache.lucene.analysis.CharFilter in project lucene-solr by apache.

the class TestPatternReplaceCharFilter method testReplaceByEmpty.

// 012345678
// aa bb cc
public void testReplaceByEmpty() throws IOException {
    final String BLOCK = "aa bb cc";
    CharFilter cs = new PatternReplaceCharFilter(pattern("(aa)\\s+(bb)\\s+(cc)"), "", new StringReader(BLOCK));
    TokenStream ts = whitespaceMockTokenizer(cs);
    assertTokenStreamContents(ts, new String[] {});
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) CharFilter(org.apache.lucene.analysis.CharFilter) StringReader(java.io.StringReader)

Example 22 with CharFilter

use of org.apache.lucene.analysis.CharFilter in project lucene-solr by apache.

the class TestPatternReplaceCharFilter method test1block1matchLonger.

//           11111
// 012345678901234
// aa bb cc dd
// aa##bb###cc dd
public void test1block1matchLonger() throws IOException {
    final String BLOCK = "aa bb cc dd";
    CharFilter cs = new PatternReplaceCharFilter(pattern("(aa)\\s+(bb)\\s+(cc)"), "$1##$2###$3", new StringReader(BLOCK));
    TokenStream ts = whitespaceMockTokenizer(cs);
    assertTokenStreamContents(ts, new String[] { "aa##bb###cc", "dd" }, new int[] { 0, 9 }, new int[] { 8, 11 }, BLOCK.length());
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) CharFilter(org.apache.lucene.analysis.CharFilter) StringReader(java.io.StringReader)

Example 23 with CharFilter

use of org.apache.lucene.analysis.CharFilter in project lucene-solr by apache.

the class TestICUNormalizer2CharFilter method testTokenStream.

public void testTokenStream() throws IOException {
    // '℃', '№', '㈱', '㌘', 'サ'+'<<', 'ソ'+'<<', '㌰'+'<<'
    String input = "℃ № ㈱ ㌘ ザ ゾ ㌰゙";
    CharFilter reader = new ICUNormalizer2CharFilter(new StringReader(input), Normalizer2.getInstance(null, "nfkc", Normalizer2.Mode.COMPOSE));
    Tokenizer tokenStream = new MockTokenizer(MockTokenizer.WHITESPACE, false);
    tokenStream.setReader(reader);
    assertTokenStreamContents(tokenStream, new String[] { "°C", "No", "(株)", "グラム", "ザ", "ゾ", "ピゴ" }, new int[] { 0, 2, 4, 6, 8, 11, 14 }, new int[] { 1, 3, 5, 7, 10, 13, 16 }, input.length());
}
Also used : MockTokenizer(org.apache.lucene.analysis.MockTokenizer) CharFilter(org.apache.lucene.analysis.CharFilter) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) NGramTokenizer(org.apache.lucene.analysis.ngram.NGramTokenizer)

Example 24 with CharFilter

use of org.apache.lucene.analysis.CharFilter in project lucene-solr by apache.

the class TestICUNormalizer2CharFilter method testTokenStream2.

public void testTokenStream2() throws IOException {
    // '㌰', '<<'゙, '5', '℃', '№', '㈱', '㌘', 'サ', '<<', 'ソ', '<<'
    String input = "㌰゙5℃№㈱㌘ザゾ";
    CharFilter reader = new ICUNormalizer2CharFilter(new StringReader(input), Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE));
    Tokenizer tokenStream = new NGramTokenizer(newAttributeFactory(), 1, 1);
    tokenStream.setReader(reader);
    assertTokenStreamContents(tokenStream, new String[] { "ピ", "ゴ", "5", "°", "c", "n", "o", "(", "株", ")", "グ", "ラ", "ム", "ザ", "ゾ" }, new int[] { 0, 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 9 }, new int[] { 1, 2, 3, 3, 4, 4, 5, 5, 5, 6, 6, 6, 7, 9, 11 }, input.length());
}
Also used : CharFilter(org.apache.lucene.analysis.CharFilter) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) NGramTokenizer(org.apache.lucene.analysis.ngram.NGramTokenizer) NGramTokenizer(org.apache.lucene.analysis.ngram.NGramTokenizer)

Example 25 with CharFilter

use of org.apache.lucene.analysis.CharFilter in project lucene-solr by apache.

the class TestICUNormalizer2CharFilter method testNormalization.

public void testNormalization() throws IOException {
    String input = "ʰ㌰゙5℃№㈱㌘,バッファーの正規化のテスト.㋐㋑㋒㋓㋔カキクケコザジズゼゾg̈각/각நிเกषिchkʷक्षि";
    Normalizer2 normalizer = Normalizer2.getInstance(null, "nfkc_cf", Normalizer2.Mode.COMPOSE);
    String expectedOutput = normalizer.normalize(input);
    CharFilter reader = new ICUNormalizer2CharFilter(new StringReader(input), normalizer);
    char[] tempBuff = new char[10];
    StringBuilder output = new StringBuilder();
    while (true) {
        int length = reader.read(tempBuff);
        if (length == -1) {
            break;
        }
        output.append(tempBuff, 0, length);
        assertEquals(output.toString(), normalizer.normalize(input.substring(0, reader.correctOffset(output.length()))));
    }
    assertEquals(expectedOutput, output.toString());
}
Also used : Normalizer2(com.ibm.icu.text.Normalizer2) CharFilter(org.apache.lucene.analysis.CharFilter) StringReader(java.io.StringReader)

Aggregations

CharFilter (org.apache.lucene.analysis.CharFilter)41 StringReader (java.io.StringReader)40 TokenStream (org.apache.lucene.analysis.TokenStream)26 Tokenizer (org.apache.lucene.analysis.Tokenizer)10 MockTokenizer (org.apache.lucene.analysis.MockTokenizer)7 MappingCharFilter (org.apache.lucene.analysis.charfilter.MappingCharFilter)4 Normalizer2 (com.ibm.icu.text.Normalizer2)3 ArrayList (java.util.ArrayList)3 NormalizeCharMap (org.apache.lucene.analysis.charfilter.NormalizeCharMap)3 NGramTokenizer (org.apache.lucene.analysis.ngram.NGramTokenizer)3 HashMap (java.util.HashMap)2 Settings (org.elasticsearch.common.settings.Settings)2 Index (org.elasticsearch.index.Index)2 AnalysisICUPlugin (org.elasticsearch.plugin.analysis.icu.AnalysisICUPlugin)2 IOException (java.io.IOException)1 MockCharFilter (org.apache.lucene.analysis.MockCharFilter)1