Search in sources :

Example 6 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class SolrSuggester method init.

/**
   * Uses the <code>config</code> and the <code>core</code> to initialize the underlying 
   * Lucene suggester
   * */
public String init(NamedList<?> config, SolrCore core) {
    LOG.info("init: " + config);
    // read the config
    name = config.get(NAME) != null ? (String) config.get(NAME) : DEFAULT_DICT_NAME;
    sourceLocation = (String) config.get(LOCATION);
    lookupImpl = (String) config.get(LOOKUP_IMPL);
    dictionaryImpl = (String) config.get(DICTIONARY_IMPL);
    String store = (String) config.get(STORE_DIR);
    if (lookupImpl == null) {
        lookupImpl = LookupFactory.DEFAULT_FILE_BASED_DICT;
        LOG.info("No " + LOOKUP_IMPL + " parameter was provided falling back to " + lookupImpl);
    }
    contextFilterQueryAnalyzer = new TokenizerChain(new StandardTokenizerFactory(Collections.EMPTY_MAP), null);
    // initialize appropriate lookup instance
    factory = core.getResourceLoader().newInstance(lookupImpl, LookupFactory.class);
    lookup = factory.create(config, core);
    if (lookup != null && lookup instanceof Closeable) {
        core.addCloseHook(new CloseHook() {

            @Override
            public void preClose(SolrCore core) {
                try {
                    ((Closeable) lookup).close();
                } catch (IOException e) {
                    LOG.warn("Could not close the suggester lookup.", e);
                }
            }

            @Override
            public void postClose(SolrCore core) {
            }
        });
    }
    // if store directory is provided make it or load up the lookup with its content
    if (store != null && !store.isEmpty()) {
        storeDir = new File(store);
        if (!storeDir.isAbsolute()) {
            storeDir = new File(core.getDataDir() + File.separator + storeDir);
        }
        if (!storeDir.exists()) {
            storeDir.mkdirs();
        } else if (getStoreFile().exists()) {
            if (LOG.isDebugEnabled()) {
                LOG.debug("attempt reload of the stored lookup from file " + getStoreFile());
            }
            try {
                lookup.load(new FileInputStream(getStoreFile()));
            } catch (IOException e) {
                LOG.warn("Loading stored lookup data failed, possibly not cached yet");
            }
        }
    }
    // dictionary configuration
    if (dictionaryImpl == null) {
        dictionaryImpl = (sourceLocation == null) ? DictionaryFactory.DEFAULT_INDEX_BASED_DICT : DictionaryFactory.DEFAULT_FILE_BASED_DICT;
        LOG.info("No " + DICTIONARY_IMPL + " parameter was provided falling back to " + dictionaryImpl);
    }
    dictionaryFactory = core.getResourceLoader().newInstance(dictionaryImpl, DictionaryFactory.class);
    dictionaryFactory.setParams(config);
    LOG.info("Dictionary loaded with params: " + config);
    return name;
}
Also used : CloseHook(org.apache.solr.core.CloseHook) TokenizerChain(org.apache.solr.analysis.TokenizerChain) SolrCore(org.apache.solr.core.SolrCore) Closeable(java.io.Closeable) StandardTokenizerFactory(org.apache.lucene.analysis.standard.StandardTokenizerFactory) IOException(java.io.IOException) File(java.io.File) FileInputStream(java.io.FileInputStream)

Example 7 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class MultiTermTest method testMultiFound.

@Test
public void testMultiFound() {
    SchemaField field = h.getCore().getLatestSchema().getField("content_multi");
    Analyzer analyzer = ((TextField) field.getType()).getMultiTermAnalyzer();
    assertTrue(analyzer instanceof TokenizerChain);
    assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof WhitespaceTokenizerFactory);
    TokenizerChain tc = (TokenizerChain) analyzer;
    for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
        assertTrue((factory instanceof ASCIIFoldingFilterFactory) || (factory instanceof LowerCaseFilterFactory));
    }
    analyzer = field.getType().getIndexAnalyzer();
    assertTrue(analyzer instanceof TokenizerChain);
    assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof WhitespaceTokenizerFactory);
    tc = (TokenizerChain) analyzer;
    for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
        assertTrue((factory instanceof ASCIIFoldingFilterFactory) || (factory instanceof TrimFilterFactory));
    }
    assertTrue(tc.getCharFilterFactories().length == 0);
}
Also used : TokenizerChain(org.apache.solr.analysis.TokenizerChain) ASCIIFoldingFilterFactory(org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilterFactory) LowerCaseFilterFactory(org.apache.lucene.analysis.core.LowerCaseFilterFactory) TrimFilterFactory(org.apache.lucene.analysis.miscellaneous.TrimFilterFactory) Analyzer(org.apache.lucene.analysis.Analyzer) WhitespaceTokenizerFactory(org.apache.lucene.analysis.core.WhitespaceTokenizerFactory) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory) Test(org.junit.Test)

Example 8 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class MultiTermTest method testDefaultCopiedToMulti.

@Test
public void testDefaultCopiedToMulti() {
    SchemaField field = h.getCore().getLatestSchema().getField("content_ws");
    Analyzer analyzer = ((TextField) field.getType()).getMultiTermAnalyzer();
    assertTrue(analyzer instanceof TokenizerChain);
    assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof KeywordTokenizerFactory);
    TokenizerChain tc = (TokenizerChain) analyzer;
    for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
        assertTrue((factory instanceof ASCIIFoldingFilterFactory) || (factory instanceof LowerCaseFilterFactory));
    }
    assertTrue(tc.getCharFilterFactories().length == 0);
}
Also used : TokenizerChain(org.apache.solr.analysis.TokenizerChain) ASCIIFoldingFilterFactory(org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilterFactory) LowerCaseFilterFactory(org.apache.lucene.analysis.core.LowerCaseFilterFactory) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory) KeywordTokenizerFactory(org.apache.lucene.analysis.core.KeywordTokenizerFactory) Test(org.junit.Test)

Example 9 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class FieldAnalysisRequestHandlerTest method testCustomAttribute.

//See SOLR-8460
@Test
public void testCustomAttribute() throws Exception {
    FieldAnalysisRequest request = new FieldAnalysisRequest();
    request.addFieldType("skutype1");
    request.setFieldValue("hi, 3456-12 a Test");
    request.setShowMatch(false);
    FieldType fieldType = new TextField();
    Analyzer analyzer = new TokenizerChain(new TokenizerFactory(Collections.emptyMap()) {

        @Override
        public Tokenizer create(AttributeFactory factory) {
            return new CustomTokenizer(factory);
        }
    }, new TokenFilterFactory[] { new TokenFilterFactory(Collections.emptyMap()) {

        @Override
        public TokenStream create(TokenStream input) {
            return new CustomTokenFilter(input);
        }
    } });
    fieldType.setIndexAnalyzer(analyzer);
    NamedList<NamedList> result = handler.analyzeValues(request, fieldType, "fieldNameUnused");
    // just test that we see "900" in the flags attribute here
    List<NamedList> tokenInfoList = (List<NamedList>) result.findRecursive("index", CustomTokenFilter.class.getName());
    // '1' from CustomTokenFilter plus 900 from CustomFlagsAttributeImpl.
    assertEquals(901, tokenInfoList.get(0).get("org.apache.lucene.analysis.tokenattributes.FlagsAttribute#flags"));
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) TokenizerFactory(org.apache.lucene.analysis.util.TokenizerFactory) NamedList(org.apache.solr.common.util.NamedList) AttributeFactory(org.apache.lucene.util.AttributeFactory) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory) FieldType(org.apache.solr.schema.FieldType) TokenizerChain(org.apache.solr.analysis.TokenizerChain) TextField(org.apache.solr.schema.TextField) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) List(java.util.List) FieldAnalysisRequest(org.apache.solr.client.solrj.request.FieldAnalysisRequest) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) Test(org.junit.Test)

Example 10 with TokenizerChain

use of org.apache.solr.analysis.TokenizerChain in project lucene-solr by apache.

the class MultiTermTest method testQueryCopiedToMulti.

@Test
public void testQueryCopiedToMulti() {
    SchemaField field = h.getCore().getLatestSchema().getField("content_charfilter");
    Analyzer analyzer = ((TextField) field.getType()).getMultiTermAnalyzer();
    assertTrue(analyzer instanceof TokenizerChain);
    assertTrue(((TokenizerChain) analyzer).getTokenizerFactory() instanceof KeywordTokenizerFactory);
    TokenizerChain tc = (TokenizerChain) analyzer;
    for (TokenFilterFactory factory : tc.getTokenFilterFactories()) {
        assertTrue(factory instanceof LowerCaseFilterFactory);
    }
    assertTrue(tc.getCharFilterFactories().length == 1);
    assertTrue(tc.getCharFilterFactories()[0] instanceof MappingCharFilterFactory);
}
Also used : TokenizerChain(org.apache.solr.analysis.TokenizerChain) LowerCaseFilterFactory(org.apache.lucene.analysis.core.LowerCaseFilterFactory) MappingCharFilterFactory(org.apache.lucene.analysis.charfilter.MappingCharFilterFactory) Analyzer(org.apache.lucene.analysis.Analyzer) TokenFilterFactory(org.apache.lucene.analysis.util.TokenFilterFactory) KeywordTokenizerFactory(org.apache.lucene.analysis.core.KeywordTokenizerFactory) Test(org.junit.Test)

Aggregations

TokenizerChain (org.apache.solr.analysis.TokenizerChain)14 TokenFilterFactory (org.apache.lucene.analysis.util.TokenFilterFactory)12 Analyzer (org.apache.lucene.analysis.Analyzer)10 CharFilterFactory (org.apache.lucene.analysis.util.CharFilterFactory)5 TokenizerFactory (org.apache.lucene.analysis.util.TokenizerFactory)5 ArrayList (java.util.ArrayList)4 Test (org.junit.Test)4 KeywordTokenizerFactory (org.apache.lucene.analysis.core.KeywordTokenizerFactory)3 LowerCaseFilterFactory (org.apache.lucene.analysis.core.LowerCaseFilterFactory)3 IOException (java.io.IOException)2 HashMap (java.util.HashMap)2 List (java.util.List)2 TokenStream (org.apache.lucene.analysis.TokenStream)2 Tokenizer (org.apache.lucene.analysis.Tokenizer)2 KeywordAnalyzer (org.apache.lucene.analysis.core.KeywordAnalyzer)2 ASCIIFoldingFilterFactory (org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilterFactory)2 SolrException (org.apache.solr.common.SolrException)2 NamedList (org.apache.solr.common.util.NamedList)2 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)2 Closeable (java.io.Closeable)1