Search in sources :

Example 1 with TokenizerFactory

use of org.elasticsearch.index.analysis.TokenizerFactory in project elasticsearch by elastic.

the class TransportAnalyzeAction method parseTokenizerFactory.

private static TokenizerFactory parseTokenizerFactory(AnalyzeRequest request, IndexAnalyzers indexAnalzyers, AnalysisRegistry analysisRegistry, Environment environment) throws IOException {
    TokenizerFactory tokenizerFactory;
    final AnalyzeRequest.NameOrDefinition tokenizer = request.tokenizer();
    // parse anonymous settings
    if (tokenizer.definition != null) {
        Settings settings = getAnonymousSettings(tokenizer.definition);
        String tokenizerTypeName = settings.get("type");
        if (tokenizerTypeName == null) {
            throw new IllegalArgumentException("Missing [type] setting for anonymous tokenizer: " + tokenizer.definition);
        }
        AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizerTypeName);
        if (tokenizerFactoryFactory == null) {
            throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizerTypeName + "]");
        }
        // Need to set anonymous "name" of tokenizer
        tokenizerFactory = tokenizerFactoryFactory.get(getNaIndexSettings(settings), environment, "_anonymous_tokenizer", settings);
    } else {
        AnalysisModule.AnalysisProvider<TokenizerFactory> tokenizerFactoryFactory;
        if (indexAnalzyers == null) {
            tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name);
            if (tokenizerFactoryFactory == null) {
                throw new IllegalArgumentException("failed to find global tokenizer under [" + tokenizer.name + "]");
            }
            tokenizerFactory = tokenizerFactoryFactory.get(environment, tokenizer.name);
        } else {
            tokenizerFactoryFactory = analysisRegistry.getTokenizerProvider(tokenizer.name, indexAnalzyers.getIndexSettings());
            if (tokenizerFactoryFactory == null) {
                throw new IllegalArgumentException("failed to find tokenizer under [" + tokenizer.name + "]");
            }
            tokenizerFactory = tokenizerFactoryFactory.get(indexAnalzyers.getIndexSettings(), environment, tokenizer.name, AnalysisRegistry.getSettingsFromIndexSettings(indexAnalzyers.getIndexSettings(), AnalysisRegistry.INDEX_ANALYSIS_TOKENIZER + "." + tokenizer.name));
        }
    }
    return tokenizerFactory;
}
Also used : TokenizerFactory(org.elasticsearch.index.analysis.TokenizerFactory) AnalysisModule(org.elasticsearch.indices.analysis.AnalysisModule) Settings(org.elasticsearch.common.settings.Settings) IndexSettings(org.elasticsearch.index.IndexSettings)

Example 2 with TokenizerFactory

use of org.elasticsearch.index.analysis.TokenizerFactory in project elasticsearch by elastic.

the class TransportAnalyzeAction method detailAnalyze.

private static DetailAnalyzeResponse detailAnalyze(AnalyzeRequest request, Analyzer analyzer, String field) {
    DetailAnalyzeResponse detailResponse;
    final Set<String> includeAttributes = new HashSet<>();
    if (request.attributes() != null) {
        for (String attribute : request.attributes()) {
            includeAttributes.add(attribute.toLowerCase(Locale.ROOT));
        }
    }
    CustomAnalyzer customAnalyzer = null;
    if (analyzer instanceof CustomAnalyzer) {
        customAnalyzer = (CustomAnalyzer) analyzer;
    } else if (analyzer instanceof NamedAnalyzer && ((NamedAnalyzer) analyzer).analyzer() instanceof CustomAnalyzer) {
        customAnalyzer = (CustomAnalyzer) ((NamedAnalyzer) analyzer).analyzer();
    }
    if (customAnalyzer != null) {
        // customAnalyzer = divide charfilter, tokenizer tokenfilters
        CharFilterFactory[] charFilterFactories = customAnalyzer.charFilters();
        TokenizerFactory tokenizerFactory = customAnalyzer.tokenizerFactory();
        TokenFilterFactory[] tokenFilterFactories = customAnalyzer.tokenFilters();
        String[][] charFiltersTexts = new String[charFilterFactories != null ? charFilterFactories.length : 0][request.text().length];
        TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? tokenFilterFactories.length : 0];
        TokenListCreator tokenizerTokenListCreator = new TokenListCreator();
        for (int textIndex = 0; textIndex < request.text().length; textIndex++) {
            String charFilteredSource = request.text()[textIndex];
            Reader reader = new FastStringReader(charFilteredSource);
            if (charFilterFactories != null) {
                for (int charFilterIndex = 0; charFilterIndex < charFilterFactories.length; charFilterIndex++) {
                    reader = charFilterFactories[charFilterIndex].create(reader);
                    Reader readerForWriteOut = new FastStringReader(charFilteredSource);
                    readerForWriteOut = charFilterFactories[charFilterIndex].create(readerForWriteOut);
                    charFilteredSource = writeCharStream(readerForWriteOut);
                    charFiltersTexts[charFilterIndex][textIndex] = charFilteredSource;
                }
            }
            // analyzing only tokenizer
            Tokenizer tokenizer = tokenizerFactory.create();
            tokenizer.setReader(reader);
            tokenizerTokenListCreator.analyze(tokenizer, customAnalyzer, field, includeAttributes);
            // analyzing each tokenfilter
            if (tokenFilterFactories != null) {
                for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFilterFactories.length; tokenFilterIndex++) {
                    if (tokenFiltersTokenListCreator[tokenFilterIndex] == null) {
                        tokenFiltersTokenListCreator[tokenFilterIndex] = new TokenListCreator();
                    }
                    TokenStream stream = createStackedTokenStream(request.text()[textIndex], charFilterFactories, tokenizerFactory, tokenFilterFactories, tokenFilterIndex + 1);
                    tokenFiltersTokenListCreator[tokenFilterIndex].analyze(stream, customAnalyzer, field, includeAttributes);
                }
            }
        }
        DetailAnalyzeResponse.CharFilteredText[] charFilteredLists = new DetailAnalyzeResponse.CharFilteredText[charFiltersTexts.length];
        if (charFilterFactories != null) {
            for (int charFilterIndex = 0; charFilterIndex < charFiltersTexts.length; charFilterIndex++) {
                charFilteredLists[charFilterIndex] = new DetailAnalyzeResponse.CharFilteredText(charFilterFactories[charFilterIndex].name(), charFiltersTexts[charFilterIndex]);
            }
        }
        DetailAnalyzeResponse.AnalyzeTokenList[] tokenFilterLists = new DetailAnalyzeResponse.AnalyzeTokenList[tokenFiltersTokenListCreator.length];
        if (tokenFilterFactories != null) {
            for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFiltersTokenListCreator.length; tokenFilterIndex++) {
                tokenFilterLists[tokenFilterIndex] = new DetailAnalyzeResponse.AnalyzeTokenList(tokenFilterFactories[tokenFilterIndex].name(), tokenFiltersTokenListCreator[tokenFilterIndex].getArrayTokens());
            }
        }
        detailResponse = new DetailAnalyzeResponse(charFilteredLists, new DetailAnalyzeResponse.AnalyzeTokenList(tokenizerFactory.name(), tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists);
    } else {
        String name;
        if (analyzer instanceof NamedAnalyzer) {
            name = ((NamedAnalyzer) analyzer).name();
        } else {
            name = analyzer.getClass().getName();
        }
        TokenListCreator tokenListCreator = new TokenListCreator();
        for (String text : request.text()) {
            tokenListCreator.analyze(analyzer.tokenStream(field, text), analyzer, field, includeAttributes);
        }
        detailResponse = new DetailAnalyzeResponse(new DetailAnalyzeResponse.AnalyzeTokenList(name, tokenListCreator.getArrayTokens()));
    }
    return detailResponse;
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) FastStringReader(org.elasticsearch.common.io.FastStringReader) NamedAnalyzer(org.elasticsearch.index.analysis.NamedAnalyzer) FastStringReader(org.elasticsearch.common.io.FastStringReader) Reader(java.io.Reader) Tokenizer(org.apache.lucene.analysis.Tokenizer) HashSet(java.util.HashSet) TokenizerFactory(org.elasticsearch.index.analysis.TokenizerFactory) CharFilterFactory(org.elasticsearch.index.analysis.CharFilterFactory) TokenFilterFactory(org.elasticsearch.index.analysis.TokenFilterFactory) CustomAnalyzer(org.elasticsearch.index.analysis.CustomAnalyzer)

Aggregations

TokenizerFactory (org.elasticsearch.index.analysis.TokenizerFactory)2 Reader (java.io.Reader)1 HashSet (java.util.HashSet)1 TokenStream (org.apache.lucene.analysis.TokenStream)1 Tokenizer (org.apache.lucene.analysis.Tokenizer)1 FastStringReader (org.elasticsearch.common.io.FastStringReader)1 Settings (org.elasticsearch.common.settings.Settings)1 IndexSettings (org.elasticsearch.index.IndexSettings)1 CharFilterFactory (org.elasticsearch.index.analysis.CharFilterFactory)1 CustomAnalyzer (org.elasticsearch.index.analysis.CustomAnalyzer)1 NamedAnalyzer (org.elasticsearch.index.analysis.NamedAnalyzer)1 TokenFilterFactory (org.elasticsearch.index.analysis.TokenFilterFactory)1 AnalysisModule (org.elasticsearch.indices.analysis.AnalysisModule)1