Search in sources :

Example 21 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project elasticsearch by elastic.

the class NGramTokenizerFactoryTests method testPreTokenization.

public void testPreTokenization() throws IOException {
    // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
    final Index index = new Index("test", "_na_");
    final String name = "ngr";
    final Settings indexSettings = newAnalysisSettingsBuilder().build();
    Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
    Tokenizer tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
    tokenizer.setReader(new StringReader("Åbc déf g𐐀f "));
    assertTokenStreamContents(tokenizer, new String[] { "Åb", "Åbc", "bc", "dé", "déf", "éf", "g𐐀", "g𐐀f", "𐐀f" });
    settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
    tokenizer = new NGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
    tokenizer.setReader(new StringReader(" a!$ 9"));
    assertTokenStreamContents(tokenizer, new String[] { " a", " a!", "a!", "a!$", "!$", "!$ ", "$ ", "$ 9", " 9" });
}
Also used : StringReader(java.io.StringReader) Index(org.elasticsearch.index.Index) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings)

Example 22 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project elasticsearch by elastic.

the class BaseWordDelimiterTokenFilterFactoryTestCase method testSplitOnCaseChange.

public void testSplitOnCaseChange() throws IOException {
    ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).put("index.analysis.filter.my_word_delimiter.type", type).put("index.analysis.filter.my_word_delimiter.split_on_case_change", "false").build());
    TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
    String source = "PowerShot";
    String[] expected = new String[] { "PowerShot" };
    Tokenizer tokenizer = new WhitespaceTokenizer();
    tokenizer.setReader(new StringReader(source));
    assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
Also used : WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) ESTestCase(org.elasticsearch.test.ESTestCase) StringReader(java.io.StringReader) WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 23 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project elasticsearch by elastic.

the class BaseWordDelimiterTokenFilterFactoryTestCase method testCatenateAll.

public void testCatenateAll() throws IOException {
    ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).put("index.analysis.filter.my_word_delimiter.type", type).put("index.analysis.filter.my_word_delimiter.generate_word_parts", "false").put("index.analysis.filter.my_word_delimiter.generate_number_parts", "false").put("index.analysis.filter.my_word_delimiter.catenate_all", "true").build());
    TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
    String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
    String[] expected = new String[] { "PowerShot", "50042", "wifi", "wifi4000", "j2se", "ONeil" };
    Tokenizer tokenizer = new WhitespaceTokenizer();
    tokenizer.setReader(new StringReader(source));
    assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
Also used : WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) ESTestCase(org.elasticsearch.test.ESTestCase) StringReader(java.io.StringReader) WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 24 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project elasticsearch by elastic.

the class BaseWordDelimiterTokenFilterFactoryTestCase method testStemEnglishPossessive.

public void testStemEnglishPossessive() throws IOException {
    ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromSettings(Settings.builder().put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toString()).put("index.analysis.filter.my_word_delimiter.type", type).put("index.analysis.filter.my_word_delimiter.stem_english_possessive", "false").build());
    TokenFilterFactory tokenFilter = analysis.tokenFilter.get("my_word_delimiter");
    String source = "PowerShot 500-42 wi-fi wi-fi-4000 j2se O'Neil's";
    String[] expected = new String[] { "Power", "Shot", "500", "42", "wi", "fi", "wi", "fi", "4000", "j", "2", "se", "O", "Neil", "s" };
    Tokenizer tokenizer = new WhitespaceTokenizer();
    tokenizer.setReader(new StringReader(source));
    assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
Also used : WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) ESTestCase(org.elasticsearch.test.ESTestCase) StringReader(java.io.StringReader) WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 25 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project elasticsearch by elastic.

the class CJKFilterFactoryTests method testHanOnly.

public void testHanOnly() throws IOException {
    ESTestCase.TestAnalysis analysis = AnalysisTestsHelper.createTestAnalysisFromClassPath(createTempDir(), RESOURCE);
    TokenFilterFactory tokenFilter = analysis.tokenFilter.get("cjk_han_only");
    String source = "多くの学生が試験に落ちた。";
    String[] expected = new String[] { "多", "く", "の", "学生", "が", "試験", "に", "落", "ち", "た" };
    Tokenizer tokenizer = new StandardTokenizer();
    tokenizer.setReader(new StringReader(source));
    assertTokenStreamContents(tokenFilter.create(tokenizer), expected);
}
Also used : ESTestCase(org.elasticsearch.test.ESTestCase) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer)

Aggregations

Tokenizer (org.apache.lucene.analysis.Tokenizer)611 MockTokenizer (org.apache.lucene.analysis.MockTokenizer)288 Analyzer (org.apache.lucene.analysis.Analyzer)269 StringReader (java.io.StringReader)264 TokenStream (org.apache.lucene.analysis.TokenStream)245 KeywordTokenizer (org.apache.lucene.analysis.core.KeywordTokenizer)216 Reader (java.io.Reader)91 WhitespaceTokenizer (org.apache.lucene.analysis.core.WhitespaceTokenizer)77 StandardTokenizer (org.apache.lucene.analysis.standard.StandardTokenizer)73 StopFilter (org.apache.lucene.analysis.StopFilter)56 SetKeywordMarkerFilter (org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter)55 LowerCaseFilter (org.apache.lucene.analysis.LowerCaseFilter)51 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)47 CharArraySet (org.apache.lucene.analysis.CharArraySet)44 StandardFilter (org.apache.lucene.analysis.standard.StandardFilter)37 CharTermAttribute (org.apache.lucene.analysis.tokenattributes.CharTermAttribute)35 ESTestCase (org.elasticsearch.test.ESTestCase)30 HashMap (java.util.HashMap)24 TokenFilter (org.apache.lucene.analysis.TokenFilter)24 Random (java.util.Random)20