Search in sources :

Example 31 with TokenFilterFactory

use of org.apache.lucene.analysis.util.TokenFilterFactory in project lucene-solr by apache.

the class FieldAnalysisRequestHandlerTest method testCustomAttribute.

//See SOLR-8460
@Test
public void testCustomAttribute() throws Exception {
    FieldAnalysisRequest request = new FieldAnalysisRequest();
    request.addFieldType("skutype1");
    request.setFieldValue("hi, 3456-12 a Test");
    request.setShowMatch(false);
    FieldType fieldType = new TextField();
    Analyzer analyzer = new TokenizerChain(new TokenizerFactory(Collections.emptyMap()) {

        @Override
        public Tokenizer create(AttributeFactory factory) {
            return new CustomTokenizer(factory);
        }
    }, new TokenFilterFactory[] { new TokenFilterFactory(Collections.emptyMap()) {

        @Override
        public TokenStream create(TokenStream input) {
            return new CustomTokenFilter(input);
        }
    } });
    fieldType.setIndexAnalyzer(analyzer);
    NamedList<NamedList> result = handler.analyzeValues(request, fieldType, "fieldNameUnused");
    // just test that we see "900" in the flags attribute here
    List<NamedList> tokenInfoList = (List<NamedList>) result.findRecursive("index", CustomTokenFilter.class.getName());
    // '1' from CustomTokenFilter plus 900 from CustomFlagsAttributeImpl.
    assertEquals(901, tokenInfoList.get(0).get("org.apache.lucene.analysis.tokenattributes.FlagsAttribute#flags"));
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) TokenizerFactory(org.apache.lucene.analysis.util.TokenizerFactory) NamedList(org.apache.solr.common.util.NamedList) AttributeFactory(org.apache.lucene.util.AttributeFactory) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory) FieldType(org.apache.solr.schema.FieldType) TokenizerChain(org.apache.solr.analysis.TokenizerChain) TextField(org.apache.solr.schema.TextField) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List) FieldAnalysisRequest(org.apache.solr.client.solrj.request.FieldAnalysisRequest) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) Test(org.junit.Test)

Example 32 with TokenFilterFactory

use of org.apache.lucene.analysis.util.TokenFilterFactory in project lucene-solr by apache.

the class MultiTermTest method testQueryCopiedToMulti.

@Test
public void testQueryCopiedToMulti() {
    SchemaField field = h.getCore().getLatestSchema().getField("content_charfilter");
    Analyzer analyzer = ((TextField) field.getType()).getMultiTermAnalyzer();
    assertTrue(analyzer instanceof TokenizerChain);
    assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof KeywordTokenizerFactory);
    TokenizerChain tc = (TokenizerChain) analyzer;
    for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
        assertTrue(factory instanceof LowerCaseFilterFactory);
    }
    assertTrue(tc.getCharFilterFactories().length == 1);
    assertTrue(tc.getCharFilterFactories()[0] instanceof MappingCharFilterFactory);
}
Also used : TokenizerChain(org.apache.solr.analysis.TokenizerChain) LowerCaseFilterFactory(org.apache.lucene.analysis.core.LowerCaseFilterFactory) MappingCharFilterFactory(org.apache.lucene.analysis.charfilter.MappingCharFilterFactory) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory) KeywordTokenizerFactory(org.apache.lucene.analysis.core.KeywordTokenizerFactory) Test(org.junit.Test)

Example 33 with TokenFilterFactory

use of org.apache.lucene.analysis.util.TokenFilterFactory in project lucene-solr by apache.

the class SolrStopwordsCarrot2LexicalDataFactory method getSolrStopWordsForField.

/**
   * Obtains stop words for a field from the associated
   * {@link StopFilterFactory}, if any.
   */
private List<CharArraySet> getSolrStopWordsForField(String fieldName) {
    // of this class are not used by multiple threads at a time.
    synchronized (solrStopWords) {
        if (!solrStopWords.containsKey(fieldName)) {
            solrStopWords.put(fieldName, new ArrayList<>());
            IndexSchema schema = core.getLatestSchema();
            final Analyzer fieldAnalyzer = schema.getFieldType(fieldName).getIndexAnalyzer();
            if (fieldAnalyzer instanceof TokenizerChain) {
                final TokenFilterFactory[] filterFactories = ((TokenizerChain) fieldAnalyzer).getTokenFilterFactories();
                for (TokenFilterFactory factory : filterFactories) {
                    if (factory instanceof StopFilterFactory) {
                        // StopFilterFactory holds the stop words in a CharArraySet
                        CharArraySet stopWords = ((StopFilterFactory) factory).getStopWords();
                        solrStopWords.get(fieldName).add(stopWords);
                    }
                    if (factory instanceof CommonGramsFilterFactory) {
                        CharArraySet commonWords = ((CommonGramsFilterFactory) factory).getCommonWords();
                        solrStopWords.get(fieldName).add(commonWords);
                    }
                }
            }
        }
        return solrStopWords.get(fieldName);
    }
}
Also used : CharArraySet(org.apache.lucene.analysis.CharArraySet) StopFilterFactory(org.apache.lucene.analysis.core.StopFilterFactory) TokenizerChain(org.apache.solr.analysis.TokenizerChain) CommonGramsFilterFactory(org.apache.lucene.analysis.commongrams.CommonGramsFilterFactory) IndexSchema(org.apache.solr.schema.IndexSchema) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Example 34 with TokenFilterFactory

use of org.apache.lucene.analysis.util.TokenFilterFactory in project lucene-solr by apache.

the class SolrQueryParserBase method getReversedWildcardFilterFactory.

protected ReversedWildcardFilterFactory getReversedWildcardFilterFactory(FieldType fieldType) {
    if (leadingWildcards == null)
        leadingWildcards = new HashMap<>();
    ReversedWildcardFilterFactory fac = leadingWildcards.get(fieldType);
    if (fac != null || leadingWildcards.containsKey(fieldType)) {
        return fac;
    }
    Analyzer a = fieldType.getIndexAnalyzer();
    if (a instanceof TokenizerChain) {
        // examine the indexing analysis chain if it supports leading wildcards
        TokenizerChain tc = (TokenizerChain) a;
        TokenFilterFactory[] factories = tc.getTokenFilterFactories();
        for (TokenFilterFactory factory : factories) {
            if (factory instanceof ReversedWildcardFilterFactory) {
                fac = (ReversedWildcardFilterFactory) factory;
                break;
            }
        }
    }
    leadingWildcards.put(fieldType, fac);
    return fac;
}
Also used : TokenizerChain(org.apache.solr.analysis.TokenizerChain) HashMap(java.util.HashMap) ReversedWildcardFilterFactory(org.apache.solr.analysis.ReversedWildcardFilterFactory) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Example 35 with TokenFilterFactory

use of org.apache.lucene.analysis.util.TokenFilterFactory in project lucene-solr by apache.

the class FieldTypePluginLoader method constructMultiTermAnalyzer.

// The point here is that, if no multiterm analyzer was specified in the schema file, do one of several things:
// 1> If legacyMultiTerm == false, assemble a new analyzer composed of all of the charfilters,
//    lowercase filters and asciifoldingfilter.
// 2> If legacyMultiTerm == true just construct the analyzer from a KeywordTokenizer. That should mimic current behavior.
//    Do the same if they've specified that the old behavior is required (legacyMultiTerm="true")
private Analyzer constructMultiTermAnalyzer(Analyzer queryAnalyzer) {
    if (queryAnalyzer == null)
        return null;
    if (!(queryAnalyzer instanceof TokenizerChain)) {
        return new KeywordAnalyzer();
    }
    TokenizerChain tc = (TokenizerChain) queryAnalyzer;
    MultiTermChainBuilder builder = new MultiTermChainBuilder();
    CharFilterFactory[] charFactories = tc.getCharFilterFactories();
    for (CharFilterFactory fact : charFactories) {
        builder.add(fact);
    }
    builder.add(tc.getTokenizerFactory());
    for (TokenFilterFactory fact : tc.getTokenFilterFactories()) {
        builder.add(fact);
    }
    return builder.build();
}
Also used : KeywordAnalyzer(org.apache.lucene.analysis.core.KeywordAnalyzer) TokenizerChain(org.apache.solr.analysis.TokenizerChain) CharFilterFactory(org.apache.lucene.analysis.util.CharFilterFactory) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory)

Aggregations

TokenFilterFactory (org.apache.lucene.analysis.util.TokenFilterFactory)40 CharFilterFactory (org.apache.lucene.analysis.util.CharFilterFactory)16 Analyzer (org.apache.lucene.analysis.Analyzer)12 TokenizerChain (org.apache.solr.analysis.TokenizerChain)12 TokenizerFactory (org.apache.lucene.analysis.util.TokenizerFactory)11 TokenStream (org.apache.lucene.analysis.TokenStream)10 ArrayList (java.util.ArrayList)7 HashMap (java.util.HashMap)7 Tokenizer (org.apache.lucene.analysis.Tokenizer)6 MultiTermAwareComponent (org.apache.lucene.analysis.util.MultiTermAwareComponent)5 IOException (java.io.IOException)4 StringReader (java.io.StringReader)4 Test (org.junit.Test)4 Reader (java.io.Reader)3 Map (java.util.Map)3 CannedTokenStream (org.apache.lucene.analysis.CannedTokenStream)3 KeywordTokenizerFactory (org.apache.lucene.analysis.core.KeywordTokenizerFactory)3 ResourceLoaderAware (org.apache.lucene.analysis.util.ResourceLoaderAware)3 SolrException (org.apache.solr.common.SolrException)3 JsonElement (com.google.gson.JsonElement)2