Search in sources :

Example 11 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class SolrStopwordsCarrot2LexicalDataFactory method getSolrStopWordsForField.

/**
   * Obtains stop words for a field from the associated
   * {@link StopFilterFactory}, if any.
   */
private List<CharArraySet> getSolrStopWordsForField(String fieldName) {
    // of this class are not used by multiple threads at a time.
    synchronized (solrStopWords) {
        if (!solrStopWords.containsKey(fieldName)) {
            solrStopWords.put(fieldName, new ArrayList<>());
            IndexSchema schema = core.getLatestSchema();
            final Analyzer fieldAnalyzer = schema.getFieldType(fieldName).getIndexAnalyzer();
            if (fieldAnalyzer instanceof TokenizerChain) {
                final TokenFilterFactory[] filterFactories = ((TokenizerChain) fieldAnalyzer).getTokenFilterFactories();
                for (TokenFilterFactory factory : filterFactories) {
                    if (factory instanceof StopFilterFactory) {
                        // StopFilterFactory holds the stop words in a CharArraySet
                        CharArraySet stopWords = ((StopFilterFactory) factory).getStopWords();
                        solrStopWords.get(fieldName).add(stopWords);
                    }
                    if (factory instanceof CommonGramsFilterFactory) {
                        CharArraySet commonWords = ((CommonGramsFilterFactory) factory).getCommonWords();
                        solrStopWords.get(fieldName).add(commonWords);
                    }
                }
            }
        }
        return solrStopWords.get(fieldName);
    }
}
Also used : CharArraySet(org.apache.lucene.analysis.CharArraySet) StopFilterFactory(org.apache.lucene.analysis.core.StopFilterFactory) TokenizerChain(org.apache.solr.analysis.TokenizerChain) CommonGramsFilterFactory(org.apache.lucene.analysis.commongrams.CommonGramsFilterFactory) IndexSchema(org.apache.solr.schema.IndexSchema) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Example 12 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class SolrQueryParserBase method getReversedWildcardFilterFactory.

protected ReversedWildcardFilterFactory getReversedWildcardFilterFactory(FieldType fieldType) {
    if (leadingWildcards == null)
        leadingWildcards = new HashMap<>();
    ReversedWildcardFilterFactory fac = leadingWildcards.get(fieldType);
    if (fac != null || leadingWildcards.containsKey(fieldType)) {
        return fac;
    }
    Analyzer a = fieldType.getIndexAnalyzer();
    if (a instanceof TokenizerChain) {
        // examine the indexing analysis chain if it supports leading wildcards
        TokenizerChain tc = (TokenizerChain) a;
        TokenFilterFactory[] factories = tc.getTokenFilterFactories();
        for (TokenFilterFactory factory : factories) {
            if (factory instanceof ReversedWildcardFilterFactory) {
                fac = (ReversedWildcardFilterFactory) factory;
                break;
            }
        }
    }
    leadingWildcards.put(fieldType, fac);
    return fac;
}
Also used : TokenizerChain(org.apache.solr.analysis.TokenizerChain) HashMap(java.util.HashMap) ReversedWildcardFilterFactory(org.apache.solr.analysis.ReversedWildcardFilterFactory) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Example 13 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class FieldTypePluginLoader method constructMultiTermAnalyzer.

// The point here is that, if no multiterm analyzer was specified in the schema file, do one of several things:
// 1> If legacyMultiTerm == false, assemble a new analyzer composed of all of the charfilters,
//    lowercase filters and asciifoldingfilter.
// 2> If legacyMultiTerm == true just construct the analyzer from a KeywordTokenizer. That should mimic current behavior.
//    Do the same if they've specified that the old behavior is required (legacyMultiTerm="true")
private Analyzer constructMultiTermAnalyzer(Analyzer queryAnalyzer) {
    if (queryAnalyzer == null)
        return null;
    if (!(queryAnalyzer instanceof TokenizerChain)) {
        return new KeywordAnalyzer();
    }
    TokenizerChain tc = (TokenizerChain) queryAnalyzer;
    MultiTermChainBuilder builder = new MultiTermChainBuilder();
    CharFilterFactory[] charFactories = tc.getCharFilterFactories();
    for (CharFilterFactory fact : charFactories) {
        builder.add(fact);
    }
    builder.add(tc.getTokenizerFactory());
    for (TokenFilterFactory fact : tc.getTokenFilterFactories()) {
        builder.add(fact);
    }
    return builder.build();
}
Also used : KeywordAnalyzer(org.apache.lucene.analysis.core.KeywordAnalyzer) TokenizerChain(org.apache.solr.analysis.TokenizerChain) CharFilterFactory(org.apache.lucene.analysis.util.CharFilterFactory) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Example 14 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class PayloadUtils method getPayloadEncoder.

public static String getPayloadEncoder(FieldType fieldType) {
    // TODO: support custom payload encoding fields too somehow - maybe someone has a custom component that encodes payloads as floats
    String encoder = null;
    Analyzer a = fieldType.getIndexAnalyzer();
    if (a instanceof TokenizerChain) {
        // examine the indexing analysis chain for DelimitedPayloadTokenFilterFactory or NumericPayloadTokenFilterFactory
        TokenizerChain tc = (TokenizerChain) a;
        TokenFilterFactory[] factories = tc.getTokenFilterFactories();
        for (TokenFilterFactory factory : factories) {
            if (factory instanceof DelimitedPayloadTokenFilterFactory) {
                encoder = factory.getOriginalArgs().get(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR);
                break;
            }
            if (factory instanceof NumericPayloadTokenFilterFactory) {
                // encodes using `PayloadHelper.encodeFloat(payload)`
                encoder = "float";
                break;
            }
        }
    }
    return encoder;
}
Also used : TokenizerChain(org.apache.solr.analysis.TokenizerChain) DelimitedPayloadTokenFilterFactory(org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory) Analyzer(org.apache.lucene.analysis.Analyzer) NumericPayloadTokenFilterFactory(org.apache.lucene.analysis.payloads.NumericPayloadTokenFilterFactory) DelimitedPayloadTokenFilterFactory(org.apache.lucene.analysis.payloads.DelimitedPayloadTokenFilterFactory) NumericPayloadTokenFilterFactory(org.apache.lucene.analysis.payloads.NumericPayloadTokenFilterFactory) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Aggregations

TokenizerChain (org.apache.solr.analysis.TokenizerChain)14 TokenFilterFactory (org.apache.lucene.analysis.util.TokenFilterFactory)12 Analyzer (org.apache.lucene.analysis.Analyzer)10 CharFilterFactory (org.apache.lucene.analysis.util.CharFilterFactory)5 TokenizerFactory (org.apache.lucene.analysis.util.TokenizerFactory)5 ArrayList (java.util.ArrayList)4 Test (org.junit.Test)4 KeywordTokenizerFactory (org.apache.lucene.analysis.core.KeywordTokenizerFactory)3 LowerCaseFilterFactory (org.apache.lucene.analysis.core.LowerCaseFilterFactory)3 IOException (java.io.IOException)2 HashMap (java.util.HashMap)2 List (java.util.List)2 TokenStream (org.apache.lucene.analysis.TokenStream)2 Tokenizer (org.apache.lucene.analysis.Tokenizer)2 KeywordAnalyzer (org.apache.lucene.analysis.core.KeywordAnalyzer)2 ASCIIFoldingFilterFactory (org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilterFactory)2 SolrException (org.apache.solr.common.SolrException)2 NamedList (org.apache.solr.common.util.NamedList)2 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)2 Closeable (java.io.Closeable)1