Search in sources :

Example 91 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project lucene-solr by apache.

the class UkrainianMorfologikAnalyzer method createComponents.

/**
   * Creates a
   * {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
   * which tokenizes all the text in the provided {@link Reader}.
   * 
   * @return A
   *         {@link org.apache.lucene.analysis.Analyzer.TokenStreamComponents}
   *         built from an {@link StandardTokenizer} filtered with
   *         {@link StandardFilter}, {@link LowerCaseFilter}, {@link StopFilter}
   *         , {@link SetKeywordMarkerFilter} if a stem exclusion set is
   *         provided and {@link MorfologikFilter} on the Ukrainian dictionary.
   */
@Override
protected TokenStreamComponents createComponents(String fieldName) {
    Tokenizer source = new StandardTokenizer();
    TokenStream result = new StandardFilter(source);
    result = new LowerCaseFilter(result);
    result = new StopFilter(result, stopwords);
    if (stemExclusionSet.isEmpty() == false) {
        result = new SetKeywordMarkerFilter(result, stemExclusionSet);
    }
    result = new MorfologikFilter(result, getDictionary());
    return new TokenStreamComponents(source, result);
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) MorfologikFilter(org.apache.lucene.analysis.morfologik.MorfologikFilter) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) StopFilter(org.apache.lucene.analysis.StopFilter) SetKeywordMarkerFilter(org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter) StandardFilter(org.apache.lucene.analysis.standard.StandardFilter) Tokenizer(org.apache.lucene.analysis.Tokenizer) StandardTokenizer(org.apache.lucene.analysis.standard.StandardTokenizer) LowerCaseFilter(org.apache.lucene.analysis.LowerCaseFilter)

Example 92 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project lucene-solr by apache.

the class TestJapaneseTokenizerFactory method testMode.

/**
   * Test mode parameter: specifying normal mode
   */
public void testMode() throws IOException {
    Map<String, String> args = new HashMap<>();
    args.put("mode", "normal");
    JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(args);
    factory.inform(new StringMockResourceLoader(""));
    TokenStream ts = factory.create(newAttributeFactory());
    ((Tokenizer) ts).setReader(new StringReader("シニアソフトウェアエンジニア"));
    assertTokenStreamContents(ts, new String[] { "シニアソフトウェアエンジニア" });
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) HashMap(java.util.HashMap) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 93 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project lucene-solr by apache.

the class TestJapaneseTokenizerFactory method testSimple.

public void testSimple() throws IOException {
    JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(new HashMap<String, String>());
    factory.inform(new StringMockResourceLoader(""));
    TokenStream ts = factory.create(newAttributeFactory());
    ((Tokenizer) ts).setReader(new StringReader("これは本ではない"));
    assertTokenStreamContents(ts, new String[] { "これ", "は", "本", "で", "は", "ない" }, new int[] { 0, 2, 3, 4, 5, 6 }, new int[] { 2, 3, 4, 5, 6, 8 });
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 94 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project lucene-solr by apache.

the class TestJapaneseTokenizerFactory method testDefaults.

/**
   * Test that search mode is enabled and working by default
   */
public void testDefaults() throws IOException {
    JapaneseTokenizerFactory factory = new JapaneseTokenizerFactory(new HashMap<String, String>());
    factory.inform(new StringMockResourceLoader(""));
    TokenStream ts = factory.create(newAttributeFactory());
    ((Tokenizer) ts).setReader(new StringReader("シニアソフトウェアエンジニア"));
    assertTokenStreamContents(ts, new String[] { "シニア", "シニアソフトウェアエンジニア", "ソフトウェア", "エンジニア" });
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer)

Example 95 with Tokenizer

use of org.apache.lucene.analysis.Tokenizer in project lucene-solr by apache.

the class AnalyzerFactory method create.

public Analyzer create() {
    return new Analyzer() {

        private final Integer positionIncrementGap = AnalyzerFactory.this.positionIncrementGap;

        private final Integer offsetGap = AnalyzerFactory.this.offsetGap;

        @Override
        public Reader initReader(String fieldName, Reader reader) {
            if (charFilterFactories != null && charFilterFactories.size() > 0) {
                Reader wrappedReader = reader;
                for (CharFilterFactory charFilterFactory : charFilterFactories) {
                    wrappedReader = charFilterFactory.create(wrappedReader);
                }
                reader = wrappedReader;
            }
            return reader;
        }

        @Override
        protected Analyzer.TokenStreamComponents createComponents(String fieldName) {
            final Tokenizer tokenizer = tokenizerFactory.create();
            TokenStream tokenStream = tokenizer;
            for (TokenFilterFactory filterFactory : tokenFilterFactories) {
                tokenStream = filterFactory.create(tokenStream);
            }
            return new TokenStreamComponents(tokenizer, tokenStream);
        }

        @Override
        public int getPositionIncrementGap(String fieldName) {
            return null == positionIncrementGap ? super.getPositionIncrementGap(fieldName) : positionIncrementGap;
        }

        @Override
        public int getOffsetGap(String fieldName) {
            return null == offsetGap ? super.getOffsetGap(fieldName) : offsetGap;
        }
    };
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) CharFilterFactory(org.apache.lucene.analysis.util.CharFilterFactory) Reader(java.io.Reader) Analyzer(org.apache.lucene.analysis.Analyzer) Tokenizer(org.apache.lucene.analysis.Tokenizer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Aggregations

Tokenizer (org.apache.lucene.analysis.Tokenizer)573 MockTokenizer (org.apache.lucene.analysis.MockTokenizer)286 Analyzer (org.apache.lucene.analysis.Analyzer)265 StringReader (java.io.StringReader)249 TokenStream (org.apache.lucene.analysis.TokenStream)227 KeywordTokenizer (org.apache.lucene.analysis.core.KeywordTokenizer)216 Reader (java.io.Reader)91 WhitespaceTokenizer (org.apache.lucene.analysis.core.WhitespaceTokenizer)67 StandardTokenizer (org.apache.lucene.analysis.standard.StandardTokenizer)63 SetKeywordMarkerFilter (org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter)52 StopFilter (org.apache.lucene.analysis.StopFilter)48 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)47 LowerCaseFilter (org.apache.lucene.analysis.LowerCaseFilter)45 CharArraySet (org.apache.lucene.analysis.CharArraySet)43 StandardFilter (org.apache.lucene.analysis.standard.StandardFilter)36 ESTestCase (org.elasticsearch.test.ESTestCase)30 CharTermAttribute (org.apache.lucene.analysis.tokenattributes.CharTermAttribute)26 HashMap (java.util.HashMap)23 Random (java.util.Random)20 TokenFilter (org.apache.lucene.analysis.TokenFilter)19