Search in sources :

Example 1 with TokenizerFactory

use of org.opensearch.index.analysis.TokenizerFactory in project OpenSearch by opensearch-project.

the class ScriptedConditionTokenFilterFactory method getChainAwareTokenFilterFactory.

@Override
public TokenFilterFactory getChainAwareTokenFilterFactory(TokenizerFactory tokenizer, List<CharFilterFactory> charFilters, List<TokenFilterFactory> previousTokenFilters, Function<String, TokenFilterFactory> allFilters) {
    List<TokenFilterFactory> filters = new ArrayList<>();
    List<TokenFilterFactory> existingChain = new ArrayList<>(previousTokenFilters);
    for (String filter : filterNames) {
        TokenFilterFactory tff = allFilters.apply(filter);
        if (tff == null) {
            throw new IllegalArgumentException("ScriptedConditionTokenFilter [" + name() + "] refers to undefined token filter [" + filter + "]");
        }
        tff = tff.getChainAwareTokenFilterFactory(tokenizer, charFilters, existingChain, allFilters);
        filters.add(tff);
        existingChain.add(tff);
    }
    return new TokenFilterFactory() {

        @Override
        public String name() {
            return ScriptedConditionTokenFilterFactory.this.name();
        }

        @Override
        public TokenStream create(TokenStream tokenStream) {
            Function<TokenStream, TokenStream> filter = in -> {
                for (TokenFilterFactory tff : filters) {
                    in = tff.create(in);
                }
                return in;
            };
            return new ScriptedConditionTokenFilter(tokenStream, filter, factory.newInstance());
        }
    };
}
Also used : ScriptService(org.opensearch.script.ScriptService) TokenizerFactory(org.opensearch.index.analysis.TokenizerFactory) TokenStream(org.apache.lucene.analysis.TokenStream) AbstractTokenFilterFactory(org.opensearch.index.analysis.AbstractTokenFilterFactory) Script(org.opensearch.script.Script) TokenFilterFactory(org.opensearch.index.analysis.TokenFilterFactory) Settings(org.opensearch.common.settings.Settings) IOException(java.io.IOException) Function(java.util.function.Function) ConditionalTokenFilter(org.apache.lucene.analysis.miscellaneous.ConditionalTokenFilter) ArrayList(java.util.ArrayList) ScriptType(org.opensearch.script.ScriptType) List(java.util.List) CharFilterFactory(org.opensearch.index.analysis.CharFilterFactory) IndexSettings(org.opensearch.index.IndexSettings) TokenStream(org.apache.lucene.analysis.TokenStream) ArrayList(java.util.ArrayList) AbstractTokenFilterFactory(org.opensearch.index.analysis.AbstractTokenFilterFactory) TokenFilterFactory(org.opensearch.index.analysis.TokenFilterFactory)

Example 2 with TokenizerFactory

use of org.opensearch.index.analysis.TokenizerFactory in project OpenSearch by opensearch-project.

the class AnalysisModuleTests method testPluginPreConfiguredCharFilters.

/**
 * Tests that plugins can register pre-configured char filters that vary in behavior based on OpenSearch version, Lucene version,
 * and that do not vary based on version at all.
 */
public void testPluginPreConfiguredCharFilters() throws IOException {
    boolean noVersionSupportsMultiTerm = randomBoolean();
    boolean luceneVersionSupportsMultiTerm = randomBoolean();
    boolean opensearchVersionSupportsMultiTerm = randomBoolean();
    AnalysisRegistry registry = new AnalysisModule(TestEnvironment.newEnvironment(emptyNodeSettings), singletonList(new AnalysisPlugin() {

        @Override
        public List<PreConfiguredCharFilter> getPreConfiguredCharFilters() {
            return Arrays.asList(PreConfiguredCharFilter.singleton("no_version", noVersionSupportsMultiTerm, tokenStream -> new AppendCharFilter(tokenStream, "no_version")), PreConfiguredCharFilter.luceneVersion("lucene_version", luceneVersionSupportsMultiTerm, (tokenStream, luceneVersion) -> new AppendCharFilter(tokenStream, luceneVersion.toString())), PreConfiguredCharFilter.openSearchVersion("opensearch_version", opensearchVersionSupportsMultiTerm, (tokenStream, esVersion) -> new AppendCharFilter(tokenStream, esVersion.toString())));
        }

        @Override
        public Map<String, AnalysisProvider<TokenizerFactory>> getTokenizers() {
            // Need mock keyword tokenizer here, because alpha / beta versions are broken up by the dash.
            return singletonMap("keyword", (indexSettings, environment, name, settings) -> TokenizerFactory.newFactory(name, () -> new MockTokenizer(MockTokenizer.KEYWORD, false)));
        }
    })).getAnalysisRegistry();
    Version version = VersionUtils.randomVersion(random());
    IndexAnalyzers analyzers = getIndexAnalyzers(registry, Settings.builder().put("index.analysis.analyzer.no_version.tokenizer", "keyword").put("index.analysis.analyzer.no_version.char_filter", "no_version").put("index.analysis.analyzer.lucene_version.tokenizer", "keyword").put("index.analysis.analyzer.lucene_version.char_filter", "lucene_version").put("index.analysis.analyzer.opensearch_version.tokenizer", "keyword").put("index.analysis.analyzer.opensearch_version.char_filter", "opensearch_version").put(IndexMetadata.SETTING_VERSION_CREATED, version).build());
    assertTokenStreamContents(analyzers.get("no_version").tokenStream("", "test"), new String[] { "testno_version" });
    assertTokenStreamContents(analyzers.get("lucene_version").tokenStream("", "test"), new String[] { "test" + version.luceneVersion });
    assertTokenStreamContents(analyzers.get("opensearch_version").tokenStream("", "test"), new String[] { "test" + version });
    assertEquals("test" + (noVersionSupportsMultiTerm ? "no_version" : ""), analyzers.get("no_version").normalize("", "test").utf8ToString());
    assertEquals("test" + (luceneVersionSupportsMultiTerm ? version.luceneVersion.toString() : ""), analyzers.get("lucene_version").normalize("", "test").utf8ToString());
    assertEquals("test" + (opensearchVersionSupportsMultiTerm ? version.toString() : ""), analyzers.get("opensearch_version").normalize("", "test").utf8ToString());
}
Also used : Arrays(java.util.Arrays) Matchers.either(org.hamcrest.Matchers.either) Version(org.opensearch.Version) StopTokenFilterFactory(org.opensearch.index.analysis.StopTokenFilterFactory) Collections.singletonList(java.util.Collections.singletonList) AnalysisRegistry(org.opensearch.index.analysis.AnalysisRegistry) Directory(org.apache.lucene.store.Directory) Map(java.util.Map) PreConfiguredTokenizer(org.opensearch.index.analysis.PreConfiguredTokenizer) CustomAnalyzer(org.opensearch.index.analysis.CustomAnalyzer) Path(java.nio.file.Path) PreConfiguredTokenFilter(org.opensearch.index.analysis.PreConfiguredTokenFilter) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) Reader(java.io.Reader) StandardCharsets(java.nio.charset.StandardCharsets) UncheckedIOException(java.io.UncheckedIOException) Matchers.instanceOf(org.hamcrest.Matchers.instanceOf) List(java.util.List) MatcherAssert(org.hamcrest.MatcherAssert) Matchers.equalTo(org.hamcrest.Matchers.equalTo) IndexSettings(org.opensearch.index.IndexSettings) TokenFilter(org.apache.lucene.analysis.TokenFilter) BaseTokenStreamTestCase.assertTokenStreamContents(org.apache.lucene.analysis.BaseTokenStreamTestCase.assertTokenStreamContents) XContentType(org.opensearch.common.xcontent.XContentType) Dictionary(org.apache.lucene.analysis.hunspell.Dictionary) IndexAnalyzers(org.opensearch.index.analysis.IndexAnalyzers) MyFilterTokenFilterFactory(org.opensearch.index.analysis.MyFilterTokenFilterFactory) IndexSettingsModule(org.opensearch.test.IndexSettingsModule) TestEnvironment(org.opensearch.env.TestEnvironment) TokenizerFactory(org.opensearch.index.analysis.TokenizerFactory) Tokenizer(org.apache.lucene.analysis.Tokenizer) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) TokenFilterFactory(org.opensearch.index.analysis.TokenFilterFactory) CharFilter(org.apache.lucene.analysis.CharFilter) LegacyESVersion(org.opensearch.LegacyESVersion) Analysis(org.opensearch.index.analysis.Analysis) VersionUtils(org.opensearch.test.VersionUtils) Streams(org.opensearch.common.io.Streams) CharFilterFactory(org.opensearch.index.analysis.CharFilterFactory) StandardTokenizerFactory(org.opensearch.index.analysis.StandardTokenizerFactory) AnalysisProvider(org.opensearch.indices.analysis.AnalysisModule.AnalysisProvider) Collections.singletonMap(java.util.Collections.singletonMap) CharTermAttribute(org.apache.lucene.analysis.tokenattributes.CharTermAttribute) Environment(org.opensearch.env.Environment) TokenStream(org.apache.lucene.analysis.TokenStream) Files(java.nio.file.Files) BufferedWriter(java.io.BufferedWriter) Analyzer(org.apache.lucene.analysis.Analyzer) IOException(java.io.IOException) PreConfiguredCharFilter(org.opensearch.index.analysis.PreConfiguredCharFilter) AnalysisPlugin(org.opensearch.plugins.AnalysisPlugin) StringReader(java.io.StringReader) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) InputStream(java.io.InputStream) TokenizerFactory(org.opensearch.index.analysis.TokenizerFactory) StandardTokenizerFactory(org.opensearch.index.analysis.StandardTokenizerFactory) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) AnalysisRegistry(org.opensearch.index.analysis.AnalysisRegistry) Version(org.opensearch.Version) LegacyESVersion(org.opensearch.LegacyESVersion) IndexAnalyzers(org.opensearch.index.analysis.IndexAnalyzers) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) Map(java.util.Map) Collections.singletonMap(java.util.Collections.singletonMap) AnalysisPlugin(org.opensearch.plugins.AnalysisPlugin)

Example 3 with TokenizerFactory

use of org.opensearch.index.analysis.TokenizerFactory in project OpenSearch by opensearch-project.

the class SynonymsAnalysisTests method testTokenFiltersBypassSynonymAnalysis.

public void testTokenFiltersBypassSynonymAnalysis() throws IOException {
    Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put("path.home", createTempDir().toString()).putList("word_list", "a").put("hyphenation_patterns_path", "foo").build();
    IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
    String[] bypassingFactories = new String[] { "dictionary_decompounder" };
    CommonAnalysisPlugin plugin = new CommonAnalysisPlugin();
    for (String factory : bypassingFactories) {
        TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings);
        TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings);
        SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, null, "synonym", settings);
        Analyzer analyzer = stff.buildSynonymAnalyzer(tok, Collections.emptyList(), Collections.singletonList(tff), null);
        try (TokenStream ts = analyzer.tokenStream("field", "text")) {
            assertThat(ts, instanceOf(KeywordTokenizer.class));
        }
    }
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) TokenizerFactory(org.opensearch.index.analysis.TokenizerFactory) IndexSettings(org.opensearch.index.IndexSettings) Analyzer(org.apache.lucene.analysis.Analyzer) KeywordTokenizer(org.apache.lucene.analysis.core.KeywordTokenizer) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings) TokenFilterFactory(org.opensearch.index.analysis.TokenFilterFactory)

Example 4 with TokenizerFactory

use of org.opensearch.index.analysis.TokenizerFactory in project OpenSearch by opensearch-project.

the class SynonymsAnalysisTests method testDisallowedTokenFilters.

public void testDisallowedTokenFilters() throws IOException {
    Settings settings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, VersionUtils.randomVersionBetween(random(), LegacyESVersion.V_7_0_0, Version.CURRENT)).put("path.home", createTempDir().toString()).putList("common_words", "a", "b").put("output_unigrams", "true").build();
    IndexSettings idxSettings = IndexSettingsModule.newIndexSettings("index", settings);
    CommonAnalysisPlugin plugin = new CommonAnalysisPlugin();
    String[] disallowedFactories = new String[] { "multiplexer", "cjk_bigram", "common_grams", "ngram", "edge_ngram", "word_delimiter", "word_delimiter_graph", "fingerprint" };
    for (String factory : disallowedFactories) {
        TokenFilterFactory tff = plugin.getTokenFilters().get(factory).get(idxSettings, null, factory, settings);
        TokenizerFactory tok = new KeywordTokenizerFactory(idxSettings, null, "keyword", settings);
        SynonymTokenFilterFactory stff = new SynonymTokenFilterFactory(idxSettings, null, "synonym", settings);
        IllegalArgumentException e = expectThrows(IllegalArgumentException.class, "Expected IllegalArgumentException for factory " + factory, () -> stff.buildSynonymAnalyzer(tok, Collections.emptyList(), Collections.singletonList(tff), null));
        assertEquals(factory, "Token filter [" + factory + "] cannot be used to parse synonyms", e.getMessage());
    }
}
Also used : TokenizerFactory(org.opensearch.index.analysis.TokenizerFactory) IndexSettings(org.opensearch.index.IndexSettings) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings) TokenFilterFactory(org.opensearch.index.analysis.TokenFilterFactory)

Example 5 with TokenizerFactory

use of org.opensearch.index.analysis.TokenizerFactory in project OpenSearch by opensearch-project.

the class TransportAnalyzeAction method detailAnalyze.

private static AnalyzeAction.DetailAnalyzeResponse detailAnalyze(AnalyzeAction.Request request, Analyzer analyzer, int maxTokenCount) {
    AnalyzeAction.DetailAnalyzeResponse detailResponse;
    final Set<String> includeAttributes = new HashSet<>();
    if (request.attributes() != null) {
        for (String attribute : request.attributes()) {
            includeAttributes.add(attribute.toLowerCase(Locale.ROOT));
        }
    }
    // maybe unwrap analyzer from NamedAnalyzer
    Analyzer potentialCustomAnalyzer = analyzer;
    if (analyzer instanceof NamedAnalyzer) {
        potentialCustomAnalyzer = ((NamedAnalyzer) analyzer).analyzer();
    }
    if (potentialCustomAnalyzer instanceof AnalyzerComponentsProvider) {
        AnalyzerComponentsProvider customAnalyzer = (AnalyzerComponentsProvider) potentialCustomAnalyzer;
        // note: this is not field-name dependent in our cases so we can leave out the argument
        int positionIncrementGap = potentialCustomAnalyzer.getPositionIncrementGap("");
        int offsetGap = potentialCustomAnalyzer.getOffsetGap("");
        AnalyzerComponents components = customAnalyzer.getComponents();
        // divide charfilter, tokenizer tokenfilters
        CharFilterFactory[] charFilterFactories = components.getCharFilters();
        TokenizerFactory tokenizerFactory = components.getTokenizerFactory();
        TokenFilterFactory[] tokenFilterFactories = components.getTokenFilters();
        String[][] charFiltersTexts = new String[charFilterFactories != null ? charFilterFactories.length : 0][request.text().length];
        TokenListCreator[] tokenFiltersTokenListCreator = new TokenListCreator[tokenFilterFactories != null ? tokenFilterFactories.length : 0];
        TokenListCreator tokenizerTokenListCreator = new TokenListCreator(maxTokenCount);
        for (int textIndex = 0; textIndex < request.text().length; textIndex++) {
            String charFilteredSource = request.text()[textIndex];
            Reader reader = new StringReader(charFilteredSource);
            if (charFilterFactories != null) {
                for (int charFilterIndex = 0; charFilterIndex < charFilterFactories.length; charFilterIndex++) {
                    reader = charFilterFactories[charFilterIndex].create(reader);
                    Reader readerForWriteOut = new StringReader(charFilteredSource);
                    readerForWriteOut = charFilterFactories[charFilterIndex].create(readerForWriteOut);
                    charFilteredSource = writeCharStream(readerForWriteOut);
                    charFiltersTexts[charFilterIndex][textIndex] = charFilteredSource;
                }
            }
            // analyzing only tokenizer
            Tokenizer tokenizer = tokenizerFactory.create();
            tokenizer.setReader(reader);
            tokenizerTokenListCreator.analyze(tokenizer, includeAttributes, positionIncrementGap, offsetGap);
            // analyzing each tokenfilter
            if (tokenFilterFactories != null) {
                for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFilterFactories.length; tokenFilterIndex++) {
                    if (tokenFiltersTokenListCreator[tokenFilterIndex] == null) {
                        tokenFiltersTokenListCreator[tokenFilterIndex] = new TokenListCreator(maxTokenCount);
                    }
                    TokenStream stream = createStackedTokenStream(request.text()[textIndex], charFilterFactories, tokenizerFactory, tokenFilterFactories, tokenFilterIndex + 1);
                    tokenFiltersTokenListCreator[tokenFilterIndex].analyze(stream, includeAttributes, positionIncrementGap, offsetGap);
                }
            }
        }
        AnalyzeAction.CharFilteredText[] charFilteredLists = new AnalyzeAction.CharFilteredText[charFiltersTexts.length];
        if (charFilterFactories != null) {
            for (int charFilterIndex = 0; charFilterIndex < charFiltersTexts.length; charFilterIndex++) {
                charFilteredLists[charFilterIndex] = new AnalyzeAction.CharFilteredText(charFilterFactories[charFilterIndex].name(), charFiltersTexts[charFilterIndex]);
            }
        }
        AnalyzeAction.AnalyzeTokenList[] tokenFilterLists = new AnalyzeAction.AnalyzeTokenList[tokenFiltersTokenListCreator.length];
        if (tokenFilterFactories != null) {
            for (int tokenFilterIndex = 0; tokenFilterIndex < tokenFiltersTokenListCreator.length; tokenFilterIndex++) {
                tokenFilterLists[tokenFilterIndex] = new AnalyzeAction.AnalyzeTokenList(tokenFilterFactories[tokenFilterIndex].name(), tokenFiltersTokenListCreator[tokenFilterIndex].getArrayTokens());
            }
        }
        detailResponse = new AnalyzeAction.DetailAnalyzeResponse(charFilteredLists, new AnalyzeAction.AnalyzeTokenList(tokenizerFactory.name(), tokenizerTokenListCreator.getArrayTokens()), tokenFilterLists);
    } else {
        String name;
        if (analyzer instanceof NamedAnalyzer) {
            name = ((NamedAnalyzer) analyzer).name();
        } else {
            name = analyzer.getClass().getName();
        }
        TokenListCreator tokenListCreator = new TokenListCreator(maxTokenCount);
        for (String text : request.text()) {
            tokenListCreator.analyze(analyzer.tokenStream("", text), includeAttributes, analyzer.getPositionIncrementGap(""), analyzer.getOffsetGap(""));
        }
        detailResponse = new AnalyzeAction.DetailAnalyzeResponse(new AnalyzeAction.AnalyzeTokenList(name, tokenListCreator.getArrayTokens()));
    }
    return detailResponse;
}
Also used : TokenStream(org.apache.lucene.analysis.TokenStream) NamedAnalyzer(org.opensearch.index.analysis.NamedAnalyzer) AnalyzerComponentsProvider(org.opensearch.index.analysis.AnalyzerComponentsProvider) Reader(java.io.Reader) StringReader(java.io.StringReader) AnalyzerComponents(org.opensearch.index.analysis.AnalyzerComponents) NamedAnalyzer(org.opensearch.index.analysis.NamedAnalyzer) Analyzer(org.apache.lucene.analysis.Analyzer) StringReader(java.io.StringReader) Tokenizer(org.apache.lucene.analysis.Tokenizer) HashSet(java.util.HashSet) TokenizerFactory(org.opensearch.index.analysis.TokenizerFactory) CharFilterFactory(org.opensearch.index.analysis.CharFilterFactory) TokenFilterFactory(org.opensearch.index.analysis.TokenFilterFactory)

Aggregations

TokenFilterFactory (org.opensearch.index.analysis.TokenFilterFactory)7 TokenizerFactory (org.opensearch.index.analysis.TokenizerFactory)7 Settings (org.opensearch.common.settings.Settings)6 IndexSettings (org.opensearch.index.IndexSettings)6 TokenStream (org.apache.lucene.analysis.TokenStream)5 CharFilterFactory (org.opensearch.index.analysis.CharFilterFactory)5 IOException (java.io.IOException)3 Reader (java.io.Reader)3 List (java.util.List)3 Analyzer (org.apache.lucene.analysis.Analyzer)3 StringReader (java.io.StringReader)2 ArrayList (java.util.ArrayList)2 Collections.singletonList (java.util.Collections.singletonList)2 Collections.singletonMap (java.util.Collections.singletonMap)2 Map (java.util.Map)2 MockTokenizer (org.apache.lucene.analysis.MockTokenizer)2 Tokenizer (org.apache.lucene.analysis.Tokenizer)2 Version (org.opensearch.Version)2 NamedAnalyzer (org.opensearch.index.analysis.NamedAnalyzer)2 BufferedWriter (java.io.BufferedWriter)1