Search in sources :

Example 11 with RepositoriesMetadata

use of org.opensearch.cluster.metadata.RepositoriesMetadata in project OpenSearch by opensearch-project.

the class RepositoriesMetadataSerializationTests method doParseInstance.

@Override
protected Custom doParseInstance(XContentParser parser) throws IOException {
    assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
    RepositoriesMetadata repositoriesMetadata = RepositoriesMetadata.fromXContent(parser);
    assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken());
    List<RepositoryMetadata> repos = new ArrayList<>(repositoriesMetadata.repositories());
    repos.sort(Comparator.comparing(RepositoryMetadata::name));
    return new RepositoriesMetadata(repos);
}
Also used : RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) ArrayList(java.util.ArrayList)

Example 12 with RepositoriesMetadata

use of org.opensearch.cluster.metadata.RepositoriesMetadata in project OpenSearch by opensearch-project.

the class RepositoriesMetadataSerializationTests method createTestInstance.

@Override
protected Custom createTestInstance() {
    int numberOfRepositories = randomInt(10);
    List<RepositoryMetadata> entries = new ArrayList<>();
    for (int i = 0; i < numberOfRepositories; i++) {
        // divide by 2 to not overflow when adding to this number for the pending generation below
        final long generation = randomNonNegativeLong() / 2L;
        entries.add(new RepositoryMetadata(randomAlphaOfLength(10), randomAlphaOfLength(10), randomSettings(), generation, generation + randomLongBetween(0, generation)));
    }
    entries.sort(Comparator.comparing(RepositoryMetadata::name));
    return new RepositoriesMetadata(entries);
}
Also used : RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) ArrayList(java.util.ArrayList)

Example 13 with RepositoriesMetadata

use of org.opensearch.cluster.metadata.RepositoriesMetadata in project OpenSearch by opensearch-project.

the class BlobStoreRepository method writeIndexGen.

/**
 * Writing a new index generation is a three step process.
 * First, the {@link RepositoryMetadata} entry for this repository is set into a pending state by incrementing its
 * pending generation {@code P} while its safe generation {@code N} remains unchanged.
 * Second, the updated {@link RepositoryData} is written to generation {@code P + 1}.
 * Lastly, the {@link RepositoryMetadata} entry for this repository is updated to the new generation {@code P + 1} and thus
 * pending and safe generation are set to the same value marking the end of the update of the repository data.
 *
 * @param repositoryData RepositoryData to write
 * @param expectedGen    expected repository generation at the start of the operation
 * @param version        version of the repository metadata to write
 * @param stateFilter    filter for the last cluster state update executed by this method
 * @param listener       completion listener
 */
protected void writeIndexGen(RepositoryData repositoryData, long expectedGen, Version version, Function<ClusterState, ClusterState> stateFilter, ActionListener<RepositoryData> listener) {
    // can not write to a read only repository
    assert isReadOnly() == false;
    final long currentGen = repositoryData.getGenId();
    if (currentGen != expectedGen) {
        // the index file was updated by a concurrent operation, so we were operating on stale
        // repository data
        listener.onFailure(new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + expectedGen + "], actual current generation [" + currentGen + "]"));
        return;
    }
    // Step 1: Set repository generation state to the next possible pending generation
    final StepListener<Long> setPendingStep = new StepListener<>();
    clusterService.submitStateUpdateTask("set pending repository generation [" + metadata.name() + "][" + expectedGen + "]", new ClusterStateUpdateTask() {

        private long newGen;

        @Override
        public ClusterState execute(ClusterState currentState) {
            final RepositoryMetadata meta = getRepoMetadata(currentState);
            final String repoName = metadata.name();
            final long genInState = meta.generation();
            final boolean uninitializedMeta = meta.generation() == RepositoryData.UNKNOWN_REPO_GEN || bestEffortConsistency;
            if (uninitializedMeta == false && meta.pendingGeneration() != genInState) {
                logger.info("Trying to write new repository data over unfinished write, repo [{}] is at " + "safe generation [{}] and pending generation [{}]", meta.name(), genInState, meta.pendingGeneration());
            }
            assert expectedGen == RepositoryData.EMPTY_REPO_GEN || uninitializedMeta || expectedGen == meta.generation() : "Expected non-empty generation [" + expectedGen + "] does not match generation tracked in [" + meta + "]";
            // If we run into the empty repo generation for the expected gen, the repo is assumed to have been cleared of
            // all contents by an external process so we reset the safe generation to the empty generation.
            final long safeGeneration = expectedGen == RepositoryData.EMPTY_REPO_GEN ? RepositoryData.EMPTY_REPO_GEN : (uninitializedMeta ? expectedGen : genInState);
            // Regardless of whether or not the safe generation has been reset, the pending generation always increments so that
            // even if a repository has been manually cleared of all contents we will never reuse the same repository generation.
            // This is motivated by the consistency behavior the S3 based blob repository implementation has to support which does
            // not offer any consistency guarantees when it comes to overwriting the same blob name with different content.
            final long nextPendingGen = metadata.pendingGeneration() + 1;
            newGen = uninitializedMeta ? Math.max(expectedGen + 1, nextPendingGen) : nextPendingGen;
            assert newGen > latestKnownRepoGen.get() : "Attempted new generation [" + newGen + "] must be larger than latest known generation [" + latestKnownRepoGen.get() + "]";
            return ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(repoName, safeGeneration, newGen)).build()).build();
        }

        @Override
        public void onFailure(String source, Exception e) {
            listener.onFailure(new RepositoryException(metadata.name(), "Failed to execute cluster state update [" + source + "]", e));
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            setPendingStep.onResponse(newGen);
        }
    });
    final StepListener<RepositoryData> filterRepositoryDataStep = new StepListener<>();
    // Step 2: Write new index-N blob to repository and update index.latest
    setPendingStep.whenComplete(newGen -> threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.wrap(listener, l -> {
        // BwC logic: Load snapshot version information if any snapshot is missing a version in RepositoryData so that the new
        // RepositoryData contains a version for every snapshot
        final List<SnapshotId> snapshotIdsWithoutVersion = repositoryData.getSnapshotIds().stream().filter(snapshotId -> repositoryData.getVersion(snapshotId) == null).collect(Collectors.toList());
        if (snapshotIdsWithoutVersion.isEmpty() == false) {
            final Map<SnapshotId, Version> updatedVersionMap = new ConcurrentHashMap<>();
            final GroupedActionListener<Void> loadAllVersionsListener = new GroupedActionListener<>(ActionListener.runAfter(new ActionListener<Collection<Void>>() {

                @Override
                public void onResponse(Collection<Void> voids) {
                    logger.info("Successfully loaded all snapshot's version information for {} from snapshot metadata", AllocationService.firstListElementsToCommaDelimitedString(snapshotIdsWithoutVersion, SnapshotId::toString, logger.isDebugEnabled()));
                }

                @Override
                public void onFailure(Exception e) {
                    logger.warn("Failure when trying to load missing version information from snapshot metadata", e);
                }
            }, () -> filterRepositoryDataStep.onResponse(repositoryData.withVersions(updatedVersionMap))), snapshotIdsWithoutVersion.size());
            for (SnapshotId snapshotId : snapshotIdsWithoutVersion) {
                threadPool().executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.run(loadAllVersionsListener, () -> updatedVersionMap.put(snapshotId, getSnapshotInfo(snapshotId).version())));
            }
        } else {
            filterRepositoryDataStep.onResponse(repositoryData);
        }
    })), listener::onFailure);
    filterRepositoryDataStep.whenComplete(filteredRepositoryData -> {
        final long newGen = setPendingStep.result();
        final RepositoryData newRepositoryData = filteredRepositoryData.withGenId(newGen);
        if (latestKnownRepoGen.get() >= newGen) {
            throw new IllegalArgumentException("Tried writing generation [" + newGen + "] but repository is at least at generation [" + latestKnownRepoGen.get() + "] already");
        }
        // write the index file
        if (ensureSafeGenerationExists(expectedGen, listener::onFailure) == false) {
            return;
        }
        final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen);
        logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob);
        final BytesReference serializedRepoData = BytesReference.bytes(newRepositoryData.snapshotsToXContent(XContentFactory.jsonBuilder(), version));
        writeAtomic(blobContainer(), indexBlob, serializedRepoData, true);
        maybeWriteIndexLatest(newGen);
        // Step 3: Update CS to reflect new repository generation.
        clusterService.submitStateUpdateTask("set safe repository generation [" + metadata.name() + "][" + newGen + "]", new ClusterStateUpdateTask() {

            @Override
            public ClusterState execute(ClusterState currentState) {
                final RepositoryMetadata meta = getRepoMetadata(currentState);
                if (meta.generation() != expectedGen) {
                    throw new IllegalStateException("Tried to update repo generation to [" + newGen + "] but saw unexpected generation in state [" + meta + "]");
                }
                if (meta.pendingGeneration() != newGen) {
                    throw new IllegalStateException("Tried to update from unexpected pending repo generation [" + meta.pendingGeneration() + "] after write to generation [" + newGen + "]");
                }
                return updateRepositoryGenerationsIfNecessary(stateFilter.apply(ClusterState.builder(currentState).metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, currentState.metadata().<RepositoriesMetadata>custom(RepositoriesMetadata.TYPE).withUpdatedGeneration(metadata.name(), newGen, newGen))).build()), expectedGen, newGen);
            }

            @Override
            public void onFailure(String source, Exception e) {
                listener.onFailure(new RepositoryException(metadata.name(), "Failed to execute cluster state update [" + source + "]", e));
            }

            @Override
            public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
                cacheRepositoryData(serializedRepoData, newGen);
                threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(ActionRunnable.supply(listener, () -> {
                    // Delete all now outdated index files up to 1000 blobs back from the new generation.
                    // If there are more than 1000 dangling index-N cleanup functionality on repo delete will take care of them.
                    // Deleting one older than the current expectedGen is done for BwC reasons as older versions used to keep
                    // two index-N blobs around.
                    final List<String> oldIndexN = LongStream.range(Math.max(Math.max(expectedGen - 1, 0), newGen - 1000), newGen).mapToObj(gen -> INDEX_FILE_PREFIX + gen).collect(Collectors.toList());
                    try {
                        deleteFromContainer(blobContainer(), oldIndexN);
                    } catch (IOException e) {
                        logger.warn(() -> new ParameterizedMessage("Failed to clean up old index blobs {}", oldIndexN), e);
                    }
                    return newRepositoryData;
                }));
            }
        });
    }, listener::onFailure);
}
Also used : Metadata(org.opensearch.cluster.metadata.Metadata) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AllocationService(org.opensearch.cluster.routing.allocation.AllocationService) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) Version(org.opensearch.Version) Strings(org.opensearch.common.Strings) AbortedSnapshotException(org.opensearch.snapshots.AbortedSnapshotException) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) RecoveryState(org.opensearch.indices.recovery.RecoveryState) Map(java.util.Map) Lucene(org.opensearch.common.lucene.Lucene) ActionListener(org.opensearch.action.ActionListener) IOContext(org.apache.lucene.store.IOContext) Repository(org.opensearch.repositories.Repository) TimeValue(org.opensearch.common.unit.TimeValue) ExceptionsHelper(org.opensearch.ExceptionsHelper) Set(java.util.Set) Settings(org.opensearch.common.settings.Settings) BlobStoreIndexShardSnapshot(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) BlockingQueue(java.util.concurrent.BlockingQueue) AbstractLifecycleComponent(org.opensearch.common.component.AbstractLifecycleComponent) Logger(org.apache.logging.log4j.Logger) RepositoryOperation(org.opensearch.repositories.RepositoryOperation) Stream(java.util.stream.Stream) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) BytesArray(org.opensearch.common.bytes.BytesArray) BlobStoreIndexShardSnapshots(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) FsBlobContainer(org.opensearch.common.blobstore.fs.FsBlobContainer) StepListener(org.opensearch.action.StepListener) XContentType(org.opensearch.common.xcontent.XContentType) IndexCommit(org.apache.lucene.index.IndexCommit) ThreadPool(org.opensearch.threadpool.ThreadPool) BlobContainer(org.opensearch.common.blobstore.BlobContainer) Releasable(org.opensearch.common.lease.Releasable) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) ClusterState(org.opensearch.cluster.ClusterState) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) Numbers(org.opensearch.common.Numbers) SlicedInputStream(org.opensearch.index.snapshots.blobstore.SlicedInputStream) SnapshotException(org.opensearch.snapshots.SnapshotException) Streams(org.opensearch.common.io.Streams) CompressorFactory(org.opensearch.common.compress.CompressorFactory) RepositoryVerificationException(org.opensearch.repositories.RepositoryVerificationException) RepositoryCleanupInProgress(org.opensearch.cluster.RepositoryCleanupInProgress) InputStreamIndexInput(org.opensearch.common.lucene.store.InputStreamIndexInput) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) Executor(java.util.concurrent.Executor) SnapshotInfo(org.opensearch.snapshots.SnapshotInfo) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.opensearch.index.snapshots.IndexShardSnapshotFailedException) NotXContentException(org.opensearch.common.compress.NotXContentException) AtomicLong(java.util.concurrent.atomic.AtomicLong) RepositoryCleanupResult(org.opensearch.repositories.RepositoryCleanupResult) BlobPath(org.opensearch.common.blobstore.BlobPath) NamedXContentRegistry(org.opensearch.common.xcontent.NamedXContentRegistry) ClusterService(org.opensearch.cluster.service.ClusterService) CounterMetric(org.opensearch.common.metrics.CounterMetric) ShardGenerations(org.opensearch.repositories.ShardGenerations) NoSuchFileException(java.nio.file.NoSuchFileException) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) SnapshotCreationException(org.opensearch.snapshots.SnapshotCreationException) ByteSizeUnit(org.opensearch.common.unit.ByteSizeUnit) SnapshotFiles(org.opensearch.index.snapshots.blobstore.SnapshotFiles) SnapshotsService(org.opensearch.snapshots.SnapshotsService) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) ConcurrentCollections(org.opensearch.common.util.concurrent.ConcurrentCollections) XContentParser(org.opensearch.common.xcontent.XContentParser) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) MapperService(org.opensearch.index.mapper.MapperService) IndexId(org.opensearch.repositories.IndexId) XContentFactory(org.opensearch.common.xcontent.XContentFactory) RepositoryStats(org.opensearch.repositories.RepositoryStats) BlobMetadata(org.opensearch.common.blobstore.BlobMetadata) RecoverySettings(org.opensearch.indices.recovery.RecoverySettings) RepositoryException(org.opensearch.repositories.RepositoryException) FileInfo.canonicalName(org.opensearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) BytesRef(org.apache.lucene.util.BytesRef) SnapshotId(org.opensearch.snapshots.SnapshotId) Collection(java.util.Collection) LoggingDeprecationHandler(org.opensearch.common.xcontent.LoggingDeprecationHandler) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Store(org.opensearch.index.store.Store) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) Nullable(org.opensearch.common.Nullable) Tuple(org.opensearch.common.collect.Tuple) BlobStore(org.opensearch.common.blobstore.BlobStore) List(java.util.List) Optional(java.util.Optional) BytesReference(org.opensearch.common.bytes.BytesReference) RateLimitingInputStream(org.opensearch.index.snapshots.blobstore.RateLimitingInputStream) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) ActionRunnable(org.opensearch.action.ActionRunnable) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) ByteSizeValue(org.opensearch.common.unit.ByteSizeValue) SnapshotDeletionsInProgress(org.opensearch.cluster.SnapshotDeletionsInProgress) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) FilterInputStream(java.io.FilterInputStream) IndexShardSnapshotStatus(org.opensearch.index.snapshots.IndexShardSnapshotStatus) IndexMetaDataGenerations(org.opensearch.repositories.IndexMetaDataGenerations) UUIDs(org.opensearch.common.UUIDs) StoreFileMetadata(org.opensearch.index.store.StoreFileMetadata) IndexOutput(org.apache.lucene.store.IndexOutput) IndexShardRestoreFailedException(org.opensearch.index.snapshots.IndexShardRestoreFailedException) RepositoryData(org.opensearch.repositories.RepositoryData) Setting(org.opensearch.common.settings.Setting) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) ShardId(org.opensearch.index.shard.ShardId) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) DeleteResult(org.opensearch.common.blobstore.DeleteResult) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) Version(org.opensearch.Version) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BytesReference(org.opensearch.common.bytes.BytesReference) ClusterState(org.opensearch.cluster.ClusterState) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) RepositoryException(org.opensearch.repositories.RepositoryException) IOException(java.io.IOException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AbortedSnapshotException(org.opensearch.snapshots.AbortedSnapshotException) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) SnapshotException(org.opensearch.snapshots.SnapshotException) RepositoryVerificationException(org.opensearch.repositories.RepositoryVerificationException) IOException(java.io.IOException) IndexShardSnapshotFailedException(org.opensearch.index.snapshots.IndexShardSnapshotFailedException) NotXContentException(org.opensearch.common.compress.NotXContentException) NoSuchFileException(java.nio.file.NoSuchFileException) SnapshotCreationException(org.opensearch.snapshots.SnapshotCreationException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) RepositoryException(org.opensearch.repositories.RepositoryException) IndexShardRestoreFailedException(org.opensearch.index.snapshots.IndexShardRestoreFailedException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) RepositoryData(org.opensearch.repositories.RepositoryData) SnapshotId(org.opensearch.snapshots.SnapshotId) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) ActionListener(org.opensearch.action.ActionListener) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) AtomicLong(java.util.concurrent.atomic.AtomicLong) Collection(java.util.Collection) StepListener(org.opensearch.action.StepListener) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 14 with RepositoriesMetadata

use of org.opensearch.cluster.metadata.RepositoriesMetadata in project OpenSearch by opensearch-project.

the class RepositoriesService method applyClusterState.

/**
 * Checks if new repositories appeared in or disappeared from cluster metadata and updates current list of
 * repositories accordingly.
 *
 * @param event cluster changed event
 */
@Override
public void applyClusterState(ClusterChangedEvent event) {
    try {
        final ClusterState state = event.state();
        RepositoriesMetadata oldMetadata = event.previousState().getMetadata().custom(RepositoriesMetadata.TYPE);
        RepositoriesMetadata newMetadata = state.getMetadata().custom(RepositoriesMetadata.TYPE);
        // Check if repositories got changed
        if ((oldMetadata == null && newMetadata == null) || (oldMetadata != null && oldMetadata.equalsIgnoreGenerations(newMetadata))) {
            for (Repository repo : repositories.values()) {
                repo.updateState(state);
            }
            return;
        }
        logger.trace("processing new index repositories for state version [{}]", event.state().version());
        Map<String, Repository> survivors = new HashMap<>();
        // First, remove repositories that are no longer there
        for (Map.Entry<String, Repository> entry : repositories.entrySet()) {
            if (newMetadata == null || newMetadata.repository(entry.getKey()) == null) {
                logger.debug("unregistering repository [{}]", entry.getKey());
                Repository repository = entry.getValue();
                closeRepository(repository);
                archiveRepositoryStats(repository, state.version());
            } else {
                survivors.put(entry.getKey(), entry.getValue());
            }
        }
        Map<String, Repository> builder = new HashMap<>();
        if (newMetadata != null) {
            // Now go through all repositories and update existing or create missing
            for (RepositoryMetadata repositoryMetadata : newMetadata.repositories()) {
                Repository repository = survivors.get(repositoryMetadata.name());
                if (repository != null) {
                    // Found previous version of this repository
                    RepositoryMetadata previousMetadata = repository.getMetadata();
                    if (previousMetadata.type().equals(repositoryMetadata.type()) == false || previousMetadata.settings().equals(repositoryMetadata.settings()) == false) {
                        // Previous version is different from the version in settings
                        logger.debug("updating repository [{}]", repositoryMetadata.name());
                        closeRepository(repository);
                        archiveRepositoryStats(repository, state.version());
                        repository = null;
                        try {
                            repository = createRepository(repositoryMetadata, typesRegistry);
                        } catch (RepositoryException ex) {
                            // TODO: this catch is bogus, it means the old repo is already closed,
                            // but we have nothing to replace it
                            logger.warn(() -> new ParameterizedMessage("failed to change repository [{}]", repositoryMetadata.name()), ex);
                        }
                    }
                } else {
                    try {
                        repository = createRepository(repositoryMetadata, typesRegistry);
                    } catch (RepositoryException ex) {
                        logger.warn(() -> new ParameterizedMessage("failed to create repository [{}]", repositoryMetadata.name()), ex);
                    }
                }
                if (repository != null) {
                    logger.debug("registering repository [{}]", repositoryMetadata.name());
                    builder.put(repositoryMetadata.name(), repository);
                }
            }
        }
        for (Repository repo : builder.values()) {
            repo.updateState(state);
        }
        repositories = Collections.unmodifiableMap(builder);
    } catch (Exception ex) {
        assert false : new AssertionError(ex);
        logger.warn("failure updating cluster state ", ex);
    }
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) HashMap(java.util.HashMap) IOException(java.io.IOException) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) MeteredBlobStoreRepository(org.opensearch.repositories.blobstore.MeteredBlobStoreRepository) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) HashMap(java.util.HashMap) Map(java.util.Map)

Example 15 with RepositoriesMetadata

use of org.opensearch.cluster.metadata.RepositoriesMetadata in project OpenSearch by opensearch-project.

the class SnapshotDisruptionIT method testDisruptionAfterFinalization.

public void testDisruptionAfterFinalization() throws Exception {
    final String idxName = "test";
    internalCluster().startMasterOnlyNodes(3);
    final String dataNode = internalCluster().startDataOnlyNode();
    ensureStableCluster(4);
    createRandomIndex(idxName);
    createRepository("test-repo", "fs");
    final String clusterManagerNode1 = internalCluster().getMasterName();
    NetworkDisruption networkDisruption = isolateClusterManagerDisruption(NetworkDisruption.UNRESPONSIVE);
    internalCluster().setDisruptionScheme(networkDisruption);
    ClusterService clusterService = internalCluster().clusterService(clusterManagerNode1);
    CountDownLatch disruptionStarted = new CountDownLatch(1);
    clusterService.addListener(new ClusterStateListener() {

        @Override
        public void clusterChanged(ClusterChangedEvent event) {
            SnapshotsInProgress snapshots = event.state().custom(SnapshotsInProgress.TYPE);
            if (snapshots != null && snapshots.entries().size() > 0) {
                final SnapshotsInProgress.Entry snapshotEntry = snapshots.entries().get(0);
                if (snapshotEntry.state() == SnapshotsInProgress.State.SUCCESS) {
                    final RepositoriesMetadata repoMeta = event.state().metadata().custom(RepositoriesMetadata.TYPE);
                    final RepositoryMetadata metadata = repoMeta.repository("test-repo");
                    if (metadata.pendingGeneration() > snapshotEntry.repositoryStateId()) {
                        logger.info("--> starting disruption");
                        networkDisruption.startDisrupting();
                        clusterService.removeListener(this);
                        disruptionStarted.countDown();
                    }
                }
            }
        }
    });
    final String snapshot = "test-snap";
    logger.info("--> starting snapshot");
    ActionFuture<CreateSnapshotResponse> future = client(clusterManagerNode1).admin().cluster().prepareCreateSnapshot("test-repo", snapshot).setWaitForCompletion(true).setIndices(idxName).execute();
    logger.info("--> waiting for disruption to start");
    assertTrue(disruptionStarted.await(1, TimeUnit.MINUTES));
    awaitNoMoreRunningOperations(dataNode);
    logger.info("--> verify that snapshot was successful or no longer exist");
    assertBusy(() -> {
        try {
            assertSnapshotExists("test-repo", snapshot);
        } catch (SnapshotMissingException exception) {
            logger.info("--> done verifying, snapshot doesn't exist");
        }
    }, 1, TimeUnit.MINUTES);
    logger.info("--> stopping disrupting");
    networkDisruption.stopDisrupting();
    ensureStableCluster(4, clusterManagerNode1);
    logger.info("--> done");
    try {
        future.get();
        fail("Should have failed because the node disconnected from the cluster during snapshot finalization");
    } catch (Exception ex) {
        final SnapshotException sne = (SnapshotException) ExceptionsHelper.unwrap(ex, SnapshotException.class);
        assertNotNull(sne);
        assertThat(sne.getMessage(), either(endsWith(" Failed to update cluster state during snapshot finalization")).or(endsWith(" no longer cluster-manager")));
        assertThat(sne.getSnapshotName(), is(snapshot));
    }
    awaitNoMoreRunningOperations(dataNode);
}
Also used : ClusterChangedEvent(org.opensearch.cluster.ClusterChangedEvent) CountDownLatch(java.util.concurrent.CountDownLatch) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) SnapshotException(org.opensearch.snapshots.SnapshotException) SnapshotException(org.opensearch.snapshots.SnapshotException) ClusterStateListener(org.opensearch.cluster.ClusterStateListener) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) ClusterService(org.opensearch.cluster.service.ClusterService) CreateSnapshotResponse(org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse) SnapshotMissingException(org.opensearch.snapshots.SnapshotMissingException) RepositoryMetadata(org.opensearch.cluster.metadata.RepositoryMetadata) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) NetworkDisruption(org.opensearch.test.disruption.NetworkDisruption)

Aggregations

RepositoriesMetadata (org.opensearch.cluster.metadata.RepositoriesMetadata)16 RepositoryMetadata (org.opensearch.cluster.metadata.RepositoryMetadata)12 ArrayList (java.util.ArrayList)8 ClusterState (org.opensearch.cluster.ClusterState)5 Metadata (org.opensearch.cluster.metadata.Metadata)5 IOException (java.io.IOException)4 Map (java.util.Map)3 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)3 SnapshotsInProgress (org.opensearch.cluster.SnapshotsInProgress)3 Path (java.nio.file.Path)2 Collection (java.util.Collection)2 Collections (java.util.Collections)2 HashMap (java.util.HashMap)2 List (java.util.List)2 Set (java.util.Set)2 Collectors (java.util.stream.Collectors)2 Stream (java.util.stream.Stream)2 LogManager (org.apache.logging.log4j.LogManager)2 Matchers.containsString (org.hamcrest.Matchers.containsString)2 ClusterService (org.opensearch.cluster.service.ClusterService)2