Search in sources :

Example 1 with Writer

use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.

the class PersistedClusterStateServiceTests method testFailsIfFreshestStateIsInStaleTerm.

public void testFailsIfFreshestStateIsInStaleTerm() throws IOException {
    final Path[] dataPaths1 = createDataPaths();
    final Path[] dataPaths2 = createDataPaths();
    final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
    final long staleCurrentTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
    final long freshCurrentTerm = randomLongBetween(staleCurrentTerm + 1, Long.MAX_VALUE);
    final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
    final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
    final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
    final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            assertFalse(clusterState.metadata().clusterUUIDCommitted());
            writeState(writer, staleCurrentTerm, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(1).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build())).version(staleVersion).build(), clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writeState(writer, freshCurrentTerm, clusterState, clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
            final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
            writeState(writer, onDiskState.currentTerm, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(2).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build())).version(freshVersion).build(), clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
        final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
        assertThat(message, allOf(containsString("inconsistent terms found"), containsString(Long.toString(staleCurrentTerm)), containsString(Long.toString(freshCurrentTerm))));
        assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString())));
        assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString())));
    }
}
Also used : Path(java.nio.file.Path) Arrays(java.util.Arrays) Metadata(org.opensearch.cluster.metadata.Metadata) Term(org.apache.lucene.index.Term) NoneCircuitBreakerService(org.opensearch.indices.breaker.NoneCircuitBreakerService) Level(org.apache.logging.log4j.Level) Version(org.opensearch.Version) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) NodeMetadata(org.opensearch.env.NodeMetadata) MockBigArrays(org.opensearch.common.util.MockBigArrays) Directory(org.apache.lucene.store.Directory) Matchers.nullValue(org.hamcrest.Matchers.nullValue) IOContext(org.apache.lucene.store.IOContext) MockPageCacheRecycler(org.opensearch.common.util.MockPageCacheRecycler) Path(java.nio.file.Path) NodeEnvironment(org.opensearch.env.NodeEnvironment) Index(org.opensearch.index.Index) Matchers.allOf(org.hamcrest.Matchers.allOf) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) Collection(java.util.Collection) Settings(org.opensearch.common.settings.Settings) Collectors(java.util.stream.Collectors) IndexWriter(org.apache.lucene.index.IndexWriter) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) Matchers.equalTo(org.hamcrest.Matchers.equalTo) BigArrays(org.opensearch.common.util.BigArrays) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Matchers.containsString(org.hamcrest.Matchers.containsString) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) MockLogAppender(org.opensearch.test.MockLogAppender) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CoordinationMetadata(org.opensearch.cluster.coordination.CoordinationMetadata) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) Matchers.lessThan(org.hamcrest.Matchers.lessThan) UUIDs(org.opensearch.common.UUIDs) ClusterSettings(org.opensearch.common.settings.ClusterSettings) IndexOutput(org.apache.lucene.store.IndexOutput) Environment(org.opensearch.env.Environment) IOException(java.io.IOException) IOError(java.io.IOError) IOUtils(org.opensearch.core.internal.io.IOUtils) TestLogging(org.opensearch.test.junit.annotations.TestLogging) AtomicLong(java.util.concurrent.atomic.AtomicLong) FilterDirectory(org.apache.lucene.store.FilterDirectory) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) ClusterName(org.opensearch.cluster.ClusterName) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer) LogManager(org.apache.logging.log4j.LogManager) ClusterState(org.opensearch.cluster.ClusterState) NodeEnvironment(org.opensearch.env.NodeEnvironment) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexWriter(org.apache.lucene.index.IndexWriter) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer)

Example 2 with Writer

use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.

the class PersistedClusterStateServiceTests method testFailsIfGlobalMetadataIsMissing.

public void testFailsIfGlobalMetadataIsMissing() throws IOException {
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writeState(writer, 0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(), clusterState);
        }
        final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
        try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
            final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
            indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
            try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
                indexWriter.commit();
            }
        }
        final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
        assertThat(message, allOf(containsString("no global metadata found"), containsString(brokenPath.toString())));
    }
}
Also used : Path(java.nio.file.Path) ClusterState(org.opensearch.cluster.ClusterState) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) NodeEnvironment(org.opensearch.env.NodeEnvironment) IndexWriter(org.apache.lucene.index.IndexWriter) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexWriter(org.apache.lucene.index.IndexWriter) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer) Directory(org.apache.lucene.store.Directory) FilterDirectory(org.apache.lucene.store.FilterDirectory) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 3 with Writer

use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.

the class PersistedClusterStateServiceTests method testFailsIfIndexMetadataIsDuplicated.

public void testFailsIfIndexMetadataIsDuplicated() throws IOException {
    // if someone attempted surgery on the metadata index by hand, e.g. deleting broken segments, then maybe some index metadata
    // is duplicated
    final Path[] dataPaths1 = createDataPaths();
    final Path[] dataPaths2 = createDataPaths();
    final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
        final String indexUUID = UUIDs.randomBase64UUID(random());
        final String indexName = randomAlphaOfLength(10);
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(1L).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(1L).build()).put(IndexMetadata.builder(indexName).version(1L).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)))).incrementVersion().build(), clusterState);
        }
        final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
        final Path dupPath = randomValueOtherThan(brokenPath, () -> randomFrom(nodeEnvironment.nodeDataPaths()));
        try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME));
            Directory dupDirectory = new NIOFSDirectory(dupPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
            try (IndexWriter indexWriter = new IndexWriter(directory, new IndexWriterConfig())) {
                // do not duplicate global metadata
                indexWriter.deleteDocuments(new Term("type", "global"));
                indexWriter.addIndexes(dupDirectory);
                indexWriter.commit();
            }
        }
        final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
        assertThat(message, allOf(containsString("duplicate metadata found"), containsString(brokenPath.toString()), containsString(indexName), containsString(indexUUID)));
    }
}
Also used : Path(java.nio.file.Path) ClusterState(org.opensearch.cluster.ClusterState) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) NodeEnvironment(org.opensearch.env.NodeEnvironment) IndexWriter(org.apache.lucene.index.IndexWriter) Matchers.containsString(org.hamcrest.Matchers.containsString) Term(org.apache.lucene.index.Term) IndexWriter(org.apache.lucene.index.IndexWriter) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer) Directory(org.apache.lucene.store.Directory) FilterDirectory(org.apache.lucene.store.FilterDirectory) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Example 4 with Writer

use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.

the class PersistedClusterStateServiceTests method testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges.

public void testPersistsAndReloadsIndexMetadataIffVersionOrTermChanges() throws IOException {
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
        final PersistedClusterStateService persistedClusterStateService = newPersistedClusterStateService(nodeEnvironment);
        final long globalVersion = randomLongBetween(1L, Long.MAX_VALUE);
        final String indexUUID = UUIDs.randomBase64UUID(random());
        final long indexMetadataVersion = randomLongBetween(1L, Long.MAX_VALUE);
        final long oldTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
        final long newTerm = randomLongBetween(oldTerm + 1, Long.MAX_VALUE);
        try (Writer writer = persistedClusterStateService.createWriter()) {
            ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
            writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(globalVersion).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(oldTerm).build()).put(IndexMetadata.builder("test").version(// -1 because it's incremented in .put()
            indexMetadataVersion - 1).settings(Settings.builder().put(IndexMetadata.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0).put(IndexMetadata.SETTING_INDEX_VERSION_CREATED.getKey(), Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, indexUUID)))).incrementVersion().build(), clusterState);
            clusterState = loadPersistedClusterState(persistedClusterStateService);
            IndexMetadata indexMetadata = clusterState.metadata().index("test");
            assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
            assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
            assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
            // ensure we do not wastefully persist the same index metadata version by making a bad update with the same version
            writer.writeIncrementalStateAndCommit(0L, clusterState, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)).build(), false)).incrementVersion().build());
            clusterState = loadPersistedClusterState(persistedClusterStateService);
            indexMetadata = clusterState.metadata().index("test");
            assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
            assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion));
            assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(0));
            // ensure that we do persist the same index metadata version by making an update with a higher version
            writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 2)).build(), true)).incrementVersion().build(), clusterState);
            clusterState = loadPersistedClusterState(persistedClusterStateService);
            indexMetadata = clusterState.metadata().index("test");
            assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
            assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(2));
            // ensure that we also persist the index metadata when the term changes
            writeState(writer, 0L, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(newTerm).build()).put(IndexMetadata.builder(indexMetadata).settings(Settings.builder().put(indexMetadata.getSettings()).put(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 3)).build(), false)).incrementVersion().build(), clusterState);
        }
        final ClusterState clusterState = loadPersistedClusterState(persistedClusterStateService);
        final IndexMetadata indexMetadata = clusterState.metadata().index("test");
        assertThat(indexMetadata.getIndexUUID(), equalTo(indexUUID));
        assertThat(indexMetadata.getVersion(), equalTo(indexMetadataVersion + 1));
        assertThat(IndexMetadata.INDEX_NUMBER_OF_REPLICAS_SETTING.get(indexMetadata.getSettings()), equalTo(3));
    }
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) NodeEnvironment(org.opensearch.env.NodeEnvironment) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) IndexWriter(org.apache.lucene.index.IndexWriter) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer)

Example 5 with Writer

use of org.opensearch.gateway.PersistedClusterStateService.Writer in project OpenSearch by opensearch-project.

the class PersistedClusterStateServiceTests method testFailsOnMismatchedNodeIds.

public void testFailsOnMismatchedNodeIds() throws IOException {
    final Path[] dataPaths1 = createDataPaths();
    final Path[] dataPaths2 = createDataPaths();
    final String[] nodeIds = new String[2];
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
        nodeIds[0] = nodeEnvironment.nodeId();
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build());
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
        nodeIds[1] = nodeEnvironment.nodeId();
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writer.writeFullStateAndCommit(0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build());
        }
    }
    NodeMetadata.FORMAT.cleanupOldFiles(Long.MAX_VALUE, dataPaths2);
    final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
    final String failure = expectThrows(IllegalStateException.class, () -> newNodeEnvironment(combinedPaths)).getMessage();
    assertThat(failure, allOf(containsString("unexpected node ID in metadata"), containsString(nodeIds[0]), containsString(nodeIds[1])));
    assertTrue("[" + failure + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> failure.contains(p.toString())));
    // verify that loadBestOnDiskState has same check
    final String message = expectThrows(IllegalStateException.class, () -> new PersistedClusterStateService(Stream.of(combinedPaths).map(path -> NodeEnvironment.resolveNodePath(path, 0)).toArray(Path[]::new), nodeIds[0], xContentRegistry(), BigArrays.NON_RECYCLING_INSTANCE, new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), () -> 0L).loadBestOnDiskState()).getMessage();
    assertThat(message, allOf(containsString("unexpected node ID in metadata"), containsString(nodeIds[0]), containsString(nodeIds[1])));
    assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString())));
}
Also used : Path(java.nio.file.Path) Arrays(java.util.Arrays) Metadata(org.opensearch.cluster.metadata.Metadata) Term(org.apache.lucene.index.Term) NoneCircuitBreakerService(org.opensearch.indices.breaker.NoneCircuitBreakerService) Level(org.apache.logging.log4j.Level) Version(org.opensearch.Version) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) NodeMetadata(org.opensearch.env.NodeMetadata) MockBigArrays(org.opensearch.common.util.MockBigArrays) Directory(org.apache.lucene.store.Directory) Matchers.nullValue(org.hamcrest.Matchers.nullValue) IOContext(org.apache.lucene.store.IOContext) MockPageCacheRecycler(org.opensearch.common.util.MockPageCacheRecycler) Path(java.nio.file.Path) NodeEnvironment(org.opensearch.env.NodeEnvironment) Index(org.opensearch.index.Index) Matchers.allOf(org.hamcrest.Matchers.allOf) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) Collection(java.util.Collection) Settings(org.opensearch.common.settings.Settings) Collectors(java.util.stream.Collectors) IndexWriter(org.apache.lucene.index.IndexWriter) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) Matchers.equalTo(org.hamcrest.Matchers.equalTo) BigArrays(org.opensearch.common.util.BigArrays) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Matchers.containsString(org.hamcrest.Matchers.containsString) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) MockLogAppender(org.opensearch.test.MockLogAppender) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CoordinationMetadata(org.opensearch.cluster.coordination.CoordinationMetadata) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) Matchers.lessThan(org.hamcrest.Matchers.lessThan) UUIDs(org.opensearch.common.UUIDs) ClusterSettings(org.opensearch.common.settings.ClusterSettings) IndexOutput(org.apache.lucene.store.IndexOutput) Environment(org.opensearch.env.Environment) IOException(java.io.IOException) IOError(java.io.IOError) IOUtils(org.opensearch.core.internal.io.IOUtils) TestLogging(org.opensearch.test.junit.annotations.TestLogging) AtomicLong(java.util.concurrent.atomic.AtomicLong) FilterDirectory(org.apache.lucene.store.FilterDirectory) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) ClusterName(org.opensearch.cluster.ClusterName) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer) LogManager(org.apache.logging.log4j.LogManager) ClusterState(org.opensearch.cluster.ClusterState) ClusterSettings(org.opensearch.common.settings.ClusterSettings) NodeEnvironment(org.opensearch.env.NodeEnvironment) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexWriter(org.apache.lucene.index.IndexWriter) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer)

Aggregations

IndexWriter (org.apache.lucene.index.IndexWriter)16 NodeEnvironment (org.opensearch.env.NodeEnvironment)16 Writer (org.opensearch.gateway.PersistedClusterStateService.Writer)16 ClusterState (org.opensearch.cluster.ClusterState)15 Matchers.containsString (org.hamcrest.Matchers.containsString)11 Path (java.nio.file.Path)10 FilterDirectory (org.apache.lucene.store.FilterDirectory)9 ClusterSettings (org.opensearch.common.settings.ClusterSettings)7 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)6 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)6 Directory (org.apache.lucene.store.Directory)6 NIOFSDirectory (org.apache.lucene.store.NIOFSDirectory)6 IOException (java.io.IOException)5 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)5 ArrayList (java.util.ArrayList)4 Collection (java.util.Collection)4 AtomicLong (java.util.concurrent.atomic.AtomicLong)4 Term (org.apache.lucene.index.Term)4 IOContext (org.apache.lucene.store.IOContext)4 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)4