Search in sources :

Example 11 with NodeEnvironment

use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.

the class IndicesServiceTests method testVerifyIfIndexContentDeleted.

public void testVerifyIfIndexContentDeleted() throws Exception {
    final Index index = new Index("test", UUIDs.randomBase64UUID());
    final IndicesService indicesService = getIndicesService();
    final NodeEnvironment nodeEnv = getNodeEnvironment();
    final MetaStateService metaStateService = getInstanceFromNode(MetaStateService.class);
    final ClusterService clusterService = getInstanceFromNode(ClusterService.class);
    final Settings idxSettings = Settings.builder().put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).put(IndexMetadata.SETTING_INDEX_UUID, index.getUUID()).build();
    final IndexMetadata indexMetadata = new IndexMetadata.Builder(index.getName()).settings(idxSettings).numberOfShards(1).numberOfReplicas(0).build();
    metaStateService.writeIndex("test index being created", indexMetadata);
    final Metadata metadata = Metadata.builder(clusterService.state().metadata()).put(indexMetadata, true).build();
    final ClusterState csWithIndex = new ClusterState.Builder(clusterService.state()).metadata(metadata).build();
    try {
        indicesService.verifyIndexIsDeleted(index, csWithIndex);
        fail("Should not be able to delete index contents when the index is part of the cluster state.");
    } catch (IllegalStateException e) {
        assertThat(e.getMessage(), containsString("Cannot delete index"));
    }
    final ClusterState withoutIndex = new ClusterState.Builder(csWithIndex).metadata(Metadata.builder(csWithIndex.metadata()).remove(index.getName())).build();
    indicesService.verifyIndexIsDeleted(index, withoutIndex);
    assertFalse("index files should be deleted", FileSystemUtils.exists(nodeEnv.indexPaths(index)));
}
Also used : MetaStateService(org.opensearch.gateway.MetaStateService) ClusterState(org.opensearch.cluster.ClusterState) ClusterService(org.opensearch.cluster.service.ClusterService) NodeEnvironment(org.opensearch.env.NodeEnvironment) Metadata(org.opensearch.cluster.metadata.Metadata) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) Index(org.opensearch.index.Index) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 12 with NodeEnvironment

use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.

the class ShardPath method selectNewPathForShard.

public static ShardPath selectNewPathForShard(NodeEnvironment env, ShardId shardId, IndexSettings indexSettings, long avgShardSizeInBytes, Map<Path, Integer> dataPathToShardCount) throws IOException {
    final Path dataPath;
    final Path statePath;
    if (indexSettings.hasCustomDataPath()) {
        dataPath = env.resolveCustomLocation(indexSettings.customDataPath(), shardId);
        statePath = env.nodePaths()[0].resolve(shardId);
    } else {
        BigInteger totFreeSpace = BigInteger.ZERO;
        for (NodeEnvironment.NodePath nodePath : env.nodePaths()) {
            totFreeSpace = totFreeSpace.add(BigInteger.valueOf(nodePath.fileStore.getUsableSpace()));
        }
        // TODO: this is a hack!! We should instead keep track of incoming (relocated) shards since we know
        // how large they will be once they're done copying, instead of a silly guess for such cases:
        // Very rough heuristic of how much disk space we expect the shard will use over its lifetime, the max of current average
        // shard size across the cluster and 5% of the total available free space on this node:
        BigInteger estShardSizeInBytes = BigInteger.valueOf(avgShardSizeInBytes).max(totFreeSpace.divide(BigInteger.valueOf(20)));
        // TODO - do we need something more extensible? Yet, this does the job for now...
        final NodeEnvironment.NodePath[] paths = env.nodePaths();
        // If no better path is chosen, use the one with the most space by default
        NodeEnvironment.NodePath bestPath = getPathWithMostFreeSpace(env);
        if (paths.length != 1) {
            Map<NodeEnvironment.NodePath, Long> pathToShardCount = env.shardCountPerPath(shardId.getIndex());
            // Compute how much space there is on each path
            final Map<NodeEnvironment.NodePath, BigInteger> pathsToSpace = new HashMap<>(paths.length);
            for (NodeEnvironment.NodePath nodePath : paths) {
                FileStore fileStore = nodePath.fileStore;
                BigInteger usableBytes = BigInteger.valueOf(fileStore.getUsableSpace());
                pathsToSpace.put(nodePath, usableBytes);
            }
            bestPath = Arrays.stream(paths).filter((path) -> pathsToSpace.get(path).subtract(estShardSizeInBytes).compareTo(BigInteger.ZERO) > 0).sorted((p1, p2) -> {
                int cmp = Long.compare(pathToShardCount.getOrDefault(p1, 0L), pathToShardCount.getOrDefault(p2, 0L));
                if (cmp == 0) {
                    // if the number of shards is equal, tie-break with the number of total shards
                    cmp = Integer.compare(dataPathToShardCount.getOrDefault(p1.path, 0), dataPathToShardCount.getOrDefault(p2.path, 0));
                    if (cmp == 0) {
                        // if the number of shards is equal, tie-break with the usable bytes
                        cmp = pathsToSpace.get(p2).compareTo(pathsToSpace.get(p1));
                    }
                }
                return cmp;
            }).findFirst().orElse(bestPath);
        }
        statePath = bestPath.resolve(shardId);
        dataPath = statePath;
    }
    return new ShardPath(indexSettings.hasCustomDataPath(), dataPath, statePath, shardId);
}
Also used : Path(java.nio.file.Path) NodeEnvironment(org.opensearch.env.NodeEnvironment) Arrays(java.util.Arrays) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) FileStore(java.nio.file.FileStore) Files(java.nio.file.Files) IOException(java.io.IOException) HashMap(java.util.HashMap) IOUtils(org.opensearch.core.internal.io.IOUtils) Objects(java.util.Objects) Logger(org.apache.logging.log4j.Logger) NamedXContentRegistry(org.opensearch.common.xcontent.NamedXContentRegistry) Map(java.util.Map) IndexSettings(org.opensearch.index.IndexSettings) Strings(org.apache.logging.log4j.util.Strings) BigInteger(java.math.BigInteger) ShardLock(org.opensearch.env.ShardLock) Path(java.nio.file.Path) NodeEnvironment(org.opensearch.env.NodeEnvironment) HashMap(java.util.HashMap) FileStore(java.nio.file.FileStore) BigInteger(java.math.BigInteger)

Example 13 with NodeEnvironment

use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.

the class IncrementalClusterStateWriterTests method testAtomicityWithFailures.

public void testAtomicityWithFailures() throws IOException {
    try (NodeEnvironment env = newNodeEnvironment()) {
        MetaStateServiceWithFailures metaStateService = new MetaStateServiceWithFailures(randomIntBetween(100, 1000), env, xContentRegistry());
        // We only guarantee atomicity of writes, if there is initial Manifest file
        Manifest manifest = Manifest.empty();
        Metadata metadata = Metadata.EMPTY_METADATA;
        metaStateService.writeManifestAndCleanup("startup", Manifest.empty());
        long currentTerm = randomNonNegativeLong();
        long clusterStateVersion = randomNonNegativeLong();
        metaStateService.failRandomly();
        Set<Metadata> possibleMetadata = new HashSet<>();
        possibleMetadata.add(metadata);
        for (int i = 0; i < randomIntBetween(1, 5); i++) {
            IncrementalClusterStateWriter.AtomicClusterStateWriter writer = new IncrementalClusterStateWriter.AtomicClusterStateWriter(metaStateService, manifest);
            metadata = randomMetadataForTx();
            Map<Index, Long> indexGenerations = new HashMap<>();
            try {
                long globalGeneration = writer.writeGlobalState("global", metadata);
                for (IndexMetadata indexMetadata : metadata) {
                    long generation = writer.writeIndex("index", indexMetadata);
                    indexGenerations.put(indexMetadata.getIndex(), generation);
                }
                Manifest newManifest = new Manifest(currentTerm, clusterStateVersion, globalGeneration, indexGenerations);
                writer.writeManifestAndCleanup("manifest", newManifest);
                possibleMetadata.clear();
                possibleMetadata.add(metadata);
                manifest = newManifest;
            } catch (WriteStateException e) {
                if (e.isDirty()) {
                    possibleMetadata.add(metadata);
                    /*
                         * If dirty WriteStateException occurred, it's only safe to proceed if there is subsequent
                         * successful write of metadata and Manifest. We prefer to break here, not to over complicate test logic.
                         * See also MetadataStateFormat#testFailRandomlyAndReadAnyState, that does not break.
                         */
                    break;
                }
            }
        }
        metaStateService.noFailures();
        Tuple<Manifest, Metadata> manifestAndMetadata = metaStateService.loadFullState();
        Metadata loadedMetadata = manifestAndMetadata.v2();
        assertTrue(possibleMetadata.stream().anyMatch(md -> metadataEquals(md, loadedMetadata)));
    }
}
Also used : Metadata(org.opensearch.cluster.metadata.Metadata) AllocationService(org.opensearch.cluster.routing.allocation.AllocationService) Level(org.apache.logging.log4j.Level) ClusterRebalanceAllocationDecider(org.opensearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider) Version(org.opensearch.Version) XContentParser(org.opensearch.common.xcontent.XContentParser) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) OpenSearchAllocationTestCase(org.opensearch.cluster.OpenSearchAllocationTestCase) Mockito.verifyNoMoreInteractions(org.mockito.Mockito.verifyNoMoreInteractions) Directory(org.apache.lucene.store.Directory) Map(java.util.Map) Path(java.nio.file.Path) Manifest(org.opensearch.cluster.metadata.Manifest) NodeEnvironment(org.opensearch.env.NodeEnvironment) Index(org.opensearch.index.Index) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) DiscoveryNodeRole(org.opensearch.cluster.node.DiscoveryNodeRole) Tuple(org.opensearch.common.collect.Tuple) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Matchers.equalTo(org.hamcrest.Matchers.equalTo) MockDirectoryWrapper(org.apache.lucene.store.MockDirectoryWrapper) Matchers.containsString(org.hamcrest.Matchers.containsString) Mockito.eq(org.mockito.Mockito.eq) Mockito.mock(org.mockito.Mockito.mock) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) MockLogAppender(org.opensearch.test.MockLogAppender) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) HashMap(java.util.HashMap) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) ArgumentCaptor(org.mockito.ArgumentCaptor) Matchers.lessThan(org.hamcrest.Matchers.lessThan) Matchers.hasSize(org.hamcrest.Matchers.hasSize) ClusterSettings(org.opensearch.common.settings.ClusterSettings) Mockito.anyString(org.mockito.Mockito.anyString) MetadataIndexStateService(org.opensearch.cluster.metadata.MetadataIndexStateService) IOException(java.io.IOException) Mockito.times(org.mockito.Mockito.times) Mockito.when(org.mockito.Mockito.when) Mockito.verify(org.mockito.Mockito.verify) XContentBuilder(org.opensearch.common.xcontent.XContentBuilder) TestLogging(org.opensearch.test.junit.annotations.TestLogging) AtomicLong(java.util.concurrent.atomic.AtomicLong) NamedXContentRegistry(org.opensearch.common.xcontent.NamedXContentRegistry) ClusterName(org.opensearch.cluster.ClusterName) RoutingTable(org.opensearch.cluster.routing.RoutingTable) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) NodeEnvironment(org.opensearch.env.NodeEnvironment) HashMap(java.util.HashMap) Metadata(org.opensearch.cluster.metadata.Metadata) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) Index(org.opensearch.index.Index) Manifest(org.opensearch.cluster.metadata.Manifest) AtomicLong(java.util.concurrent.atomic.AtomicLong) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) HashSet(java.util.HashSet)

Example 14 with NodeEnvironment

use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.

the class PersistedClusterStateServiceTests method testFailsIfFreshestStateIsInStaleTerm.

public void testFailsIfFreshestStateIsInStaleTerm() throws IOException {
    final Path[] dataPaths1 = createDataPaths();
    final Path[] dataPaths2 = createDataPaths();
    final Path[] combinedPaths = Stream.concat(Arrays.stream(dataPaths1), Arrays.stream(dataPaths2)).toArray(Path[]::new);
    final long staleCurrentTerm = randomLongBetween(1L, Long.MAX_VALUE - 1);
    final long freshCurrentTerm = randomLongBetween(staleCurrentTerm + 1, Long.MAX_VALUE);
    final long freshTerm = randomLongBetween(1L, Long.MAX_VALUE);
    final long staleTerm = randomBoolean() ? freshTerm : randomLongBetween(1L, freshTerm);
    final long freshVersion = randomLongBetween(2L, Long.MAX_VALUE);
    final long staleVersion = staleTerm == freshTerm ? randomLongBetween(1L, freshVersion - 1) : randomLongBetween(1L, Long.MAX_VALUE);
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            assertFalse(clusterState.metadata().clusterUUIDCommitted());
            writeState(writer, staleCurrentTerm, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(1).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(staleTerm).build())).version(staleVersion).build(), clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths1)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writeState(writer, freshCurrentTerm, clusterState, clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(dataPaths2)) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final PersistedClusterStateService.OnDiskState onDiskState = newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState();
            final ClusterState clusterState = clusterStateFromMetadata(onDiskState.lastAcceptedVersion, onDiskState.metadata);
            writeState(writer, onDiskState.currentTerm, ClusterState.builder(clusterState).metadata(Metadata.builder(clusterState.metadata()).version(2).coordinationMetadata(CoordinationMetadata.builder(clusterState.coordinationMetadata()).term(freshTerm).build())).version(freshVersion).build(), clusterState);
        }
    }
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(combinedPaths)) {
        final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
        assertThat(message, allOf(containsString("inconsistent terms found"), containsString(Long.toString(staleCurrentTerm)), containsString(Long.toString(freshCurrentTerm))));
        assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths1), Arrays.stream(dataPaths1).anyMatch(p -> message.contains(p.toString())));
        assertTrue("[" + message + "] should match " + Arrays.toString(dataPaths2), Arrays.stream(dataPaths2).anyMatch(p -> message.contains(p.toString())));
    }
}
Also used : Path(java.nio.file.Path) Arrays(java.util.Arrays) Metadata(org.opensearch.cluster.metadata.Metadata) Term(org.apache.lucene.index.Term) NoneCircuitBreakerService(org.opensearch.indices.breaker.NoneCircuitBreakerService) Level(org.apache.logging.log4j.Level) Version(org.opensearch.Version) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) NodeMetadata(org.opensearch.env.NodeMetadata) MockBigArrays(org.opensearch.common.util.MockBigArrays) Directory(org.apache.lucene.store.Directory) Matchers.nullValue(org.hamcrest.Matchers.nullValue) IOContext(org.apache.lucene.store.IOContext) MockPageCacheRecycler(org.opensearch.common.util.MockPageCacheRecycler) Path(java.nio.file.Path) NodeEnvironment(org.opensearch.env.NodeEnvironment) Index(org.opensearch.index.Index) Matchers.allOf(org.hamcrest.Matchers.allOf) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) Collection(java.util.Collection) Settings(org.opensearch.common.settings.Settings) Collectors(java.util.stream.Collectors) IndexWriter(org.apache.lucene.index.IndexWriter) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) Matchers.equalTo(org.hamcrest.Matchers.equalTo) BigArrays(org.opensearch.common.util.BigArrays) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) Matchers.containsString(org.hamcrest.Matchers.containsString) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) MockLogAppender(org.opensearch.test.MockLogAppender) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) CoordinationMetadata(org.opensearch.cluster.coordination.CoordinationMetadata) ArrayList(java.util.ArrayList) HashSet(java.util.HashSet) ClusterState(org.opensearch.cluster.ClusterState) Matchers.lessThan(org.hamcrest.Matchers.lessThan) UUIDs(org.opensearch.common.UUIDs) ClusterSettings(org.opensearch.common.settings.ClusterSettings) IndexOutput(org.apache.lucene.store.IndexOutput) Environment(org.opensearch.env.Environment) IOException(java.io.IOException) IOError(java.io.IOError) IOUtils(org.opensearch.core.internal.io.IOUtils) TestLogging(org.opensearch.test.junit.annotations.TestLogging) AtomicLong(java.util.concurrent.atomic.AtomicLong) FilterDirectory(org.apache.lucene.store.FilterDirectory) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) ClusterName(org.opensearch.cluster.ClusterName) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer) LogManager(org.apache.logging.log4j.LogManager) ClusterState(org.opensearch.cluster.ClusterState) NodeEnvironment(org.opensearch.env.NodeEnvironment) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexWriter(org.apache.lucene.index.IndexWriter) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer)

Example 15 with NodeEnvironment

use of org.opensearch.env.NodeEnvironment in project OpenSearch by opensearch-project.

the class PersistedClusterStateServiceTests method testFailsIfGlobalMetadataIsMissing.

public void testFailsIfGlobalMetadataIsMissing() throws IOException {
    try (NodeEnvironment nodeEnvironment = newNodeEnvironment(createDataPaths())) {
        try (Writer writer = newPersistedClusterStateService(nodeEnvironment).createWriter()) {
            final ClusterState clusterState = loadPersistedClusterState(newPersistedClusterStateService(nodeEnvironment));
            writeState(writer, 0L, ClusterState.builder(clusterState).version(randomLongBetween(1L, Long.MAX_VALUE)).build(), clusterState);
        }
        final Path brokenPath = randomFrom(nodeEnvironment.nodeDataPaths());
        try (Directory directory = new NIOFSDirectory(brokenPath.resolve(PersistedClusterStateService.METADATA_DIRECTORY_NAME))) {
            final IndexWriterConfig indexWriterConfig = new IndexWriterConfig();
            indexWriterConfig.setOpenMode(IndexWriterConfig.OpenMode.CREATE);
            try (IndexWriter indexWriter = new IndexWriter(directory, indexWriterConfig)) {
                indexWriter.commit();
            }
        }
        final String message = expectThrows(IllegalStateException.class, () -> newPersistedClusterStateService(nodeEnvironment).loadBestOnDiskState()).getMessage();
        assertThat(message, allOf(containsString("no global metadata found"), containsString(brokenPath.toString())));
    }
}
Also used : Path(java.nio.file.Path) ClusterState(org.opensearch.cluster.ClusterState) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) NodeEnvironment(org.opensearch.env.NodeEnvironment) IndexWriter(org.apache.lucene.index.IndexWriter) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexWriter(org.apache.lucene.index.IndexWriter) Writer(org.opensearch.gateway.PersistedClusterStateService.Writer) Directory(org.apache.lucene.store.Directory) FilterDirectory(org.apache.lucene.store.FilterDirectory) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig)

Aggregations

NodeEnvironment (org.opensearch.env.NodeEnvironment)62 Settings (org.opensearch.common.settings.Settings)36 Path (java.nio.file.Path)32 Matchers.containsString (org.hamcrest.Matchers.containsString)22 ClusterState (org.opensearch.cluster.ClusterState)21 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)19 ClusterSettings (org.opensearch.common.settings.ClusterSettings)17 IndexWriter (org.apache.lucene.index.IndexWriter)16 Metadata (org.opensearch.cluster.metadata.Metadata)16 Writer (org.opensearch.gateway.PersistedClusterStateService.Writer)16 Index (org.opensearch.index.Index)13 IOException (java.io.IOException)12 FilterDirectory (org.apache.lucene.store.FilterDirectory)9 Environment (org.opensearch.env.Environment)9 ArrayList (java.util.ArrayList)7 Directory (org.apache.lucene.store.Directory)7 TestThreadPool (org.opensearch.threadpool.TestThreadPool)7 FileSystem (java.nio.file.FileSystem)6 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)6 IndexWriterConfig (org.apache.lucene.index.IndexWriterConfig)6