Search in sources :

Example 1 with Index

use of org.opensearch.index.Index in project OpenSearch by opensearch-project.

the class GetDataStreamResponseTests method randomInstance.

private static DataStreamInfo randomInstance() {
    List<Index> indices = randomIndexInstances();
    long generation = indices.size() + randomLongBetween(1, 128);
    String dataStreamName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
    indices.add(new Index(getDefaultBackingIndexName(dataStreamName, generation), UUIDs.randomBase64UUID(random())));
    DataStream dataStream = new DataStream(dataStreamName, createTimestampField("@timestamp"), indices, generation);
    return new DataStreamInfo(dataStream, ClusterHealthStatus.YELLOW, randomAlphaOfLengthBetween(2, 10));
}
Also used : DataStreamInfo(org.opensearch.action.admin.indices.datastream.GetDataStreamAction.Response.DataStreamInfo) DataStream(org.opensearch.cluster.metadata.DataStream) Index(org.opensearch.index.Index)

Example 2 with Index

use of org.opensearch.index.Index in project OpenSearch by opensearch-project.

the class GetDataStreamResponseTests method assertInstances.

@Override
protected void assertInstances(GetDataStreamAction.Response serverTestInstance, GetDataStreamResponse clientInstance) {
    assertEquals(serverTestInstance.getDataStreams().size(), clientInstance.getDataStreams().size());
    Iterator<DataStreamInfo> serverIt = serverTestInstance.getDataStreams().iterator();
    Iterator<org.opensearch.client.indices.DataStream> clientIt = clientInstance.getDataStreams().iterator();
    while (serverIt.hasNext()) {
        org.opensearch.client.indices.DataStream client = clientIt.next();
        DataStream server = serverIt.next().getDataStream();
        assertEquals(server.getName(), client.getName());
        assertEquals(server.getIndices().stream().map(Index::getName).collect(Collectors.toList()), client.getIndices());
        assertEquals(server.getTimeStampField().getName(), client.getTimeStampField());
        assertEquals(server.getGeneration(), client.getGeneration());
    }
}
Also used : DataStreamInfo(org.opensearch.action.admin.indices.datastream.GetDataStreamAction.Response.DataStreamInfo) DataStream(org.opensearch.cluster.metadata.DataStream) Index(org.opensearch.index.Index)

Example 3 with Index

use of org.opensearch.index.Index in project OpenSearch by opensearch-project.

the class WhitespaceTokenizerFactoryTests method testMaxTokenLength.

public void testMaxTokenLength() throws IOException {
    final Settings indexSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build();
    IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(new Index("test", "_na_"), indexSettings);
    final Settings settings = Settings.builder().put(WhitespaceTokenizerFactory.MAX_TOKEN_LENGTH, 2).build();
    WhitespaceTokenizer tokenizer = (WhitespaceTokenizer) new WhitespaceTokenizerFactory(indexProperties, null, "whitespace_maxlen", settings).create();
    try (Reader reader = new StringReader("one, two, three")) {
        tokenizer.setReader(reader);
        assertTokenStreamContents(tokenizer, new String[] { "on", "e,", "tw", "o,", "th", "re", "e" });
    }
    final Settings defaultSettings = Settings.EMPTY;
    tokenizer = (WhitespaceTokenizer) new WhitespaceTokenizerFactory(indexProperties, null, "whitespace_maxlen", defaultSettings).create();
    String veryLongToken = RandomStrings.randomAsciiAlphanumOfLength(random(), 256);
    try (Reader reader = new StringReader(veryLongToken)) {
        tokenizer.setReader(reader);
        assertTokenStreamContents(tokenizer, new String[] { veryLongToken.substring(0, 255), veryLongToken.substring(255) });
    }
    final Settings tooLongSettings = Settings.builder().put(WhitespaceTokenizerFactory.MAX_TOKEN_LENGTH, 1024 * 1024 + 1).build();
    IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new WhitespaceTokenizerFactory(indexProperties, null, "whitespace_maxlen", tooLongSettings).create());
    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: 1048577", e.getMessage());
    final Settings negativeSettings = Settings.builder().put(WhitespaceTokenizerFactory.MAX_TOKEN_LENGTH, -1).build();
    e = expectThrows(IllegalArgumentException.class, () -> new WhitespaceTokenizerFactory(indexProperties, null, "whitespace_maxlen", negativeSettings).create());
    assertEquals("maxTokenLen must be greater than 0 and less than 1048576 passed: -1", e.getMessage());
}
Also used : WhitespaceTokenizer(org.apache.lucene.analysis.core.WhitespaceTokenizer) IndexSettings(org.opensearch.index.IndexSettings) StringReader(java.io.StringReader) Reader(java.io.Reader) StringReader(java.io.StringReader) Index(org.opensearch.index.Index) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 4 with Index

use of org.opensearch.index.Index in project OpenSearch by opensearch-project.

the class NGramTokenizerFactoryTests method testMaxNGramDiffException.

/*`
    * test that throws an error when trying to get a NGramTokenizer where difference between max_gram and min_gram
    * is greater than the allowed value of max_ngram_diff
     */
public void testMaxNGramDiffException() throws Exception {
    final Index index = new Index("test", "_na_");
    final String name = "ngr";
    final Settings indexSettings = newAnalysisSettingsBuilder().build();
    IndexSettings indexProperties = IndexSettingsModule.newIndexSettings(index, indexSettings);
    int maxAllowedNgramDiff = indexProperties.getMaxNgramDiff();
    int ngramDiff = maxAllowedNgramDiff + 1;
    int min_gram = 2;
    int max_gram = min_gram + ngramDiff;
    final Settings settings = newAnalysisSettingsBuilder().put("min_gram", min_gram).put("max_gram", max_gram).build();
    IllegalArgumentException ex = expectThrows(IllegalArgumentException.class, () -> new NGramTokenizerFactory(indexProperties, null, name, settings).create());
    assertEquals("The difference between max_gram and min_gram in NGram Tokenizer must be less than or equal to: [" + maxAllowedNgramDiff + "] but was [" + ngramDiff + "]. This limit can be set by changing the [" + IndexSettings.MAX_NGRAM_DIFF_SETTING.getKey() + "] index level setting.", ex.getMessage());
}
Also used : IndexSettings(org.opensearch.index.IndexSettings) Index(org.opensearch.index.Index) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 5 with Index

use of org.opensearch.index.Index in project OpenSearch by opensearch-project.

the class NGramTokenizerFactoryTests method testPreTokenizationEdge.

public void testPreTokenizationEdge() throws IOException {
    // Make sure that pretokenization works well and that it can be used even with token chars which are supplementary characters
    final Index index = new Index("test", "_na_");
    final String name = "ngr";
    final Settings indexSettings = newAnalysisSettingsBuilder().build();
    Settings settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit").build();
    Tokenizer tokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
    tokenizer.setReader(new StringReader("Åbc déf g\uD801\uDC00f "));
    assertTokenStreamContents(tokenizer, new String[] { "Åb", "Åbc", "dé", "déf", "g\uD801\uDC00", "g\uD801\uDC00f" });
    settings = newAnalysisSettingsBuilder().put("min_gram", 2).put("max_gram", 3).put("token_chars", "letter,digit,punctuation,whitespace,symbol").build();
    tokenizer = new EdgeNGramTokenizerFactory(IndexSettingsModule.newIndexSettings(index, indexSettings), null, name, settings).create();
    tokenizer.setReader(new StringReader(" a!$ 9"));
    assertTokenStreamContents(tokenizer, new String[] { " a", " a!" });
}
Also used : StringReader(java.io.StringReader) Index(org.opensearch.index.Index) Tokenizer(org.apache.lucene.analysis.Tokenizer) MockTokenizer(org.apache.lucene.analysis.MockTokenizer) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Aggregations

Index (org.opensearch.index.Index)402 Settings (org.opensearch.common.settings.Settings)160 ClusterState (org.opensearch.cluster.ClusterState)142 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)142 ShardId (org.opensearch.index.shard.ShardId)123 ArrayList (java.util.ArrayList)94 IOException (java.io.IOException)91 HashMap (java.util.HashMap)90 Map (java.util.Map)85 List (java.util.List)84 HashSet (java.util.HashSet)83 ShardRouting (org.opensearch.cluster.routing.ShardRouting)79 ClusterService (org.opensearch.cluster.service.ClusterService)75 Version (org.opensearch.Version)69 IndicesService (org.opensearch.indices.IndicesService)69 Collections (java.util.Collections)68 Matchers.containsString (org.hamcrest.Matchers.containsString)65 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)65 IndexSettings (org.opensearch.index.IndexSettings)63 Set (java.util.Set)62