Search in sources :

Example 41 with Tuple

use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.

the class TranslogDeletionPolicyTests method createReadersAndWriter.

private Tuple<List<TranslogReader>, TranslogWriter> createReadersAndWriter(final long now) throws IOException {
    final Path tempDir = createTempDir();
    Files.createFile(tempDir.resolve(Translog.CHECKPOINT_FILE_NAME));
    TranslogWriter writer = null;
    List<TranslogReader> readers = new ArrayList<>();
    final int numberOfReaders = randomIntBetween(0, 10);
    final String translogUUID = UUIDs.randomBase64UUID(random());
    for (long gen = 1; gen <= numberOfReaders + 1; gen++) {
        if (writer != null) {
            final TranslogReader reader = Mockito.spy(writer.closeIntoReader());
            Mockito.doReturn(writer.getLastModifiedTime()).when(reader).getLastModifiedTime();
            readers.add(reader);
        }
        writer = TranslogWriter.create(new ShardId("index", "uuid", 0), translogUUID, gen, tempDir.resolve(Translog.getFilename(gen)), FileChannel::open, TranslogConfig.DEFAULT_BUFFER_SIZE, 1L, 1L, () -> 1L, () -> 1L, randomNonNegativeLong(), new TragicExceptionHolder(), seqNo -> {
        }, BigArrays.NON_RECYCLING_INSTANCE);
        writer = Mockito.spy(writer);
        Mockito.doReturn(now - (numberOfReaders - gen + 1) * 1000).when(writer).getLastModifiedTime();
        BytesStreamOutput out = new BytesStreamOutput(4);
        final long startSeqNo = (gen - 1) * TOTAL_OPS_IN_GEN;
        final long endSeqNo = startSeqNo + TOTAL_OPS_IN_GEN - 1;
        for (long ops = endSeqNo; ops >= startSeqNo; ops--) {
            out.reset();
            out.writeInt((int) ops);
            writer.add(ReleasableBytesReference.wrap(out.bytes()), ops);
        }
    }
    return new Tuple<>(readers, writer);
}
Also used : Path(java.nio.file.Path) Files(java.nio.file.Files) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) IOException(java.io.IOException) Releasable(org.opensearch.common.lease.Releasable) Math.min(java.lang.Math.min) BytesStreamOutput(org.opensearch.common.io.stream.BytesStreamOutput) Tuple(org.opensearch.common.collect.Tuple) ArrayList(java.util.ArrayList) IOUtils(org.opensearch.core.internal.io.IOUtils) ShardId(org.opensearch.index.shard.ShardId) Mockito(org.mockito.Mockito) List(java.util.List) ReleasableBytesReference(org.opensearch.common.bytes.ReleasableBytesReference) Matchers.equalTo(org.hamcrest.Matchers.equalTo) UUIDs(org.opensearch.common.UUIDs) BigArrays(org.opensearch.common.util.BigArrays) FileChannel(java.nio.channels.FileChannel) Path(java.nio.file.Path) ArrayList(java.util.ArrayList) BytesStreamOutput(org.opensearch.common.io.stream.BytesStreamOutput) ShardId(org.opensearch.index.shard.ShardId) Tuple(org.opensearch.common.collect.Tuple)

Example 42 with Tuple

use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.

the class SnapshotsService method beginSnapshot.

/**
 * Starts snapshot.
 * <p>
 * Creates snapshot in repository and updates snapshot metadata record with list of shards that needs to be processed.
 * Note: This method is only used in clusters that contain a node older than {@link #NO_REPO_INITIALIZE_VERSION} to ensure a backwards
 * compatible path for initializing the snapshot in the repository is executed.
 *
 * @param clusterState               cluster state
 * @param snapshot                   snapshot meta data
 * @param partial                    allow partial snapshots
 * @param userCreateSnapshotListener listener
 */
private void beginSnapshot(final ClusterState clusterState, final SnapshotsInProgress.Entry snapshot, final boolean partial, final List<String> indices, final Repository repository, final ActionListener<Snapshot> userCreateSnapshotListener) {
    threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(new AbstractRunnable() {

        boolean hadAbortedInitializations;

        @Override
        protected void doRun() {
            assert initializingSnapshots.contains(snapshot.snapshot());
            if (repository.isReadOnly()) {
                throw new RepositoryException(repository.getMetadata().name(), "cannot create snapshot in a readonly repository");
            }
            final String snapshotName = snapshot.snapshot().getSnapshotId().getName();
            final StepListener<RepositoryData> repositoryDataListener = new StepListener<>();
            repository.getRepositoryData(repositoryDataListener);
            repositoryDataListener.whenComplete(repositoryData -> {
                // check if the snapshot name already exists in the repository
                if (repositoryData.getSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) {
                    throw new InvalidSnapshotNameException(repository.getMetadata().name(), snapshotName, "snapshot with the same name already exists");
                }
                if (clusterState.nodes().getMinNodeVersion().onOrAfter(NO_REPO_INITIALIZE_VERSION) == false) {
                    // In mixed version clusters we initialize the snapshot in the repository so that in case of a master failover to an
                    // older version master node snapshot finalization (that assumes initializeSnapshot was called) produces a valid
                    // snapshot.
                    repository.initializeSnapshot(snapshot.snapshot().getSnapshotId(), snapshot.indices(), metadataForSnapshot(snapshot, clusterState.metadata()));
                }
                logger.info("snapshot [{}] started", snapshot.snapshot());
                final Version version = minCompatibleVersion(clusterState.nodes().getMinNodeVersion(), repositoryData, null);
                if (indices.isEmpty()) {
                    // No indices in this snapshot - we are done
                    userCreateSnapshotListener.onResponse(snapshot.snapshot());
                    endSnapshot(SnapshotsInProgress.startedEntry(snapshot.snapshot(), snapshot.includeGlobalState(), snapshot.partial(), Collections.emptyList(), Collections.emptyList(), threadPool.absoluteTimeInMillis(), repositoryData.getGenId(), ImmutableOpenMap.of(), snapshot.userMetadata(), version), clusterState.metadata(), repositoryData);
                    return;
                }
                clusterService.submitStateUpdateTask("update_snapshot [" + snapshot.snapshot() + "]", new ClusterStateUpdateTask() {

                    @Override
                    public ClusterState execute(ClusterState currentState) {
                        SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
                        List<SnapshotsInProgress.Entry> entries = new ArrayList<>();
                        for (SnapshotsInProgress.Entry entry : snapshots.entries()) {
                            if (entry.snapshot().equals(snapshot.snapshot()) == false) {
                                entries.add(entry);
                                continue;
                            }
                            if (entry.state() == State.ABORTED) {
                                entries.add(entry);
                                assert entry.shards().isEmpty();
                                hadAbortedInitializations = true;
                            } else {
                                final List<IndexId> indexIds = repositoryData.resolveNewIndices(indices, Collections.emptyMap());
                                // Replace the snapshot that was just initialized
                                ImmutableOpenMap<ShardId, ShardSnapshotStatus> shards = shards(snapshots, currentState.custom(SnapshotDeletionsInProgress.TYPE, SnapshotDeletionsInProgress.EMPTY), currentState.metadata(), currentState.routingTable(), indexIds, useShardGenerations(version), repositoryData, entry.repository());
                                if (!partial) {
                                    Tuple<Set<String>, Set<String>> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metadata());
                                    Set<String> missing = indicesWithMissingShards.v1();
                                    Set<String> closed = indicesWithMissingShards.v2();
                                    if (missing.isEmpty() == false || closed.isEmpty() == false) {
                                        final StringBuilder failureMessage = new StringBuilder();
                                        if (missing.isEmpty() == false) {
                                            failureMessage.append("Indices don't have primary shards ");
                                            failureMessage.append(missing);
                                        }
                                        if (closed.isEmpty() == false) {
                                            if (failureMessage.length() > 0) {
                                                failureMessage.append("; ");
                                            }
                                            failureMessage.append("Indices are closed ");
                                            failureMessage.append(closed);
                                        }
                                        entries.add(new SnapshotsInProgress.Entry(entry, State.FAILED, indexIds, repositoryData.getGenId(), shards, version, failureMessage.toString()));
                                        continue;
                                    }
                                }
                                entries.add(new SnapshotsInProgress.Entry(entry, State.STARTED, indexIds, repositoryData.getGenId(), shards, version, null));
                            }
                        }
                        return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.of(unmodifiableList(entries))).build();
                    }

                    @Override
                    public void onFailure(String source, Exception e) {
                        logger.warn(() -> new ParameterizedMessage("[{}] failed to create snapshot", snapshot.snapshot().getSnapshotId()), e);
                        removeFailedSnapshotFromClusterState(snapshot.snapshot(), e, null, new CleanupAfterErrorListener(userCreateSnapshotListener, e));
                    }

                    @Override
                    public void onNoLongerMaster(String source) {
                        // We are not longer a master - we shouldn't try to do any cleanup
                        // The new master will take care of it
                        logger.warn("[{}] failed to create snapshot - no longer a master", snapshot.snapshot().getSnapshotId());
                        userCreateSnapshotListener.onFailure(new SnapshotException(snapshot.snapshot(), "master changed during snapshot initialization"));
                    }

                    @Override
                    public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
                        // The userCreateSnapshotListener.onResponse() notifies caller that the snapshot was accepted
                        // for processing. If client wants to wait for the snapshot completion, it can register snapshot
                        // completion listener in this method. For the snapshot completion to work properly, the snapshot
                        // should still exist when listener is registered.
                        userCreateSnapshotListener.onResponse(snapshot.snapshot());
                        if (hadAbortedInitializations) {
                            final SnapshotsInProgress snapshotsInProgress = newState.custom(SnapshotsInProgress.TYPE);
                            assert snapshotsInProgress != null;
                            final SnapshotsInProgress.Entry entry = snapshotsInProgress.snapshot(snapshot.snapshot());
                            assert entry != null;
                            endSnapshot(entry, newState.metadata(), repositoryData);
                        } else {
                            endCompletedSnapshots(newState);
                        }
                    }
                });
            }, this::onFailure);
        }

        @Override
        public void onFailure(Exception e) {
            logger.warn(() -> new ParameterizedMessage("failed to create snapshot [{}]", snapshot.snapshot().getSnapshotId()), e);
            removeFailedSnapshotFromClusterState(snapshot.snapshot(), e, null, new CleanupAfterErrorListener(userCreateSnapshotListener, e));
        }
    });
}
Also used : AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) RepositoryMissingException(org.opensearch.repositories.RepositoryMissingException) ImmutableOpenMap(org.opensearch.common.collect.ImmutableOpenMap) Arrays(java.util.Arrays) Metadata(org.opensearch.cluster.metadata.Metadata) Collections.unmodifiableList(java.util.Collections.unmodifiableList) DataStream(org.opensearch.cluster.metadata.DataStream) Version(org.opensearch.Version) ClusterStateApplier(org.opensearch.cluster.ClusterStateApplier) Regex(org.opensearch.common.regex.Regex) Strings(org.opensearch.common.Strings) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) Map(java.util.Map) ActionListener(org.opensearch.action.ActionListener) EnumSet(java.util.EnumSet) Repository(org.opensearch.repositories.Repository) TimeValue(org.opensearch.common.unit.TimeValue) Index(org.opensearch.index.Index) ExceptionsHelper(org.opensearch.ExceptionsHelper) Set(java.util.Set) ClusterStateTaskExecutor(org.opensearch.cluster.ClusterStateTaskExecutor) Settings(org.opensearch.common.settings.Settings) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) TransportService(org.opensearch.transport.TransportService) FailedToCommitClusterStateException(org.opensearch.cluster.coordination.FailedToCommitClusterStateException) ActionFilters(org.opensearch.action.support.ActionFilters) AbstractLifecycleComponent(org.opensearch.common.component.AbstractLifecycleComponent) ShardState(org.opensearch.cluster.SnapshotsInProgress.ShardState) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) StepListener(org.opensearch.action.StepListener) State(org.opensearch.cluster.SnapshotsInProgress.State) IndexNameExpressionResolver(org.opensearch.cluster.metadata.IndexNameExpressionResolver) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) RepositoriesService(org.opensearch.repositories.RepositoriesService) ThreadPool(org.opensearch.threadpool.ThreadPool) Priority(org.opensearch.common.Priority) TransportMasterNodeAction(org.opensearch.action.support.master.TransportMasterNodeAction) ArrayList(java.util.ArrayList) ClusterState(org.opensearch.cluster.ClusterState) LegacyESVersion(org.opensearch.LegacyESVersion) ClusterStateTaskConfig(org.opensearch.cluster.ClusterStateTaskConfig) RepositoryCleanupInProgress(org.opensearch.cluster.RepositoryCleanupInProgress) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) DeleteSnapshotRequest(org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest) ClusterService(org.opensearch.cluster.service.ClusterService) RestoreInProgress(org.opensearch.cluster.RestoreInProgress) RoutingTable(org.opensearch.cluster.routing.RoutingTable) SnapshotsInProgress.completed(org.opensearch.cluster.SnapshotsInProgress.completed) ClusterChangedEvent(org.opensearch.cluster.ClusterChangedEvent) ShardGenerations(org.opensearch.repositories.ShardGenerations) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) IndexId(org.opensearch.repositories.IndexId) Locale(java.util.Locale) NotMasterException(org.opensearch.cluster.NotMasterException) ShardSnapshotStatus(org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus) RepositoryException(org.opensearch.repositories.RepositoryException) IndexShardRoutingTable(org.opensearch.cluster.routing.IndexShardRoutingTable) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) Collectors(java.util.stream.Collectors) Nullable(org.opensearch.common.Nullable) Tuple(org.opensearch.common.collect.Tuple) Objects(java.util.Objects) List(java.util.List) Optional(java.util.Optional) ClusterStateTaskListener(org.opensearch.cluster.ClusterStateTaskListener) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) ActionRunnable(org.opensearch.action.ActionRunnable) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) CloneSnapshotRequest(org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest) HashMap(java.util.HashMap) SnapshotDeletionsInProgress(org.opensearch.cluster.SnapshotDeletionsInProgress) Deque(java.util.Deque) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Function(java.util.function.Function) HashSet(java.util.HashSet) IndexRoutingTable(org.opensearch.cluster.routing.IndexRoutingTable) UUIDs(org.opensearch.common.UUIDs) LinkedList(java.util.LinkedList) StreamInput(org.opensearch.common.io.stream.StreamInput) RepositoryData(org.opensearch.repositories.RepositoryData) Setting(org.opensearch.common.settings.Setting) Iterator(java.util.Iterator) Collections.emptySet(java.util.Collections.emptySet) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) ShardRouting(org.opensearch.cluster.routing.ShardRouting) ShardId(org.opensearch.index.shard.ShardId) Consumer(java.util.function.Consumer) CreateSnapshotRequest(org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) EnumSet(java.util.EnumSet) Set(java.util.Set) HashSet(java.util.HashSet) Collections.emptySet(java.util.Collections.emptySet) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) ShardId(org.opensearch.index.shard.ShardId) Version(org.opensearch.Version) LegacyESVersion(org.opensearch.LegacyESVersion) ClusterState(org.opensearch.cluster.ClusterState) IndexId(org.opensearch.repositories.IndexId) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) RepositoryException(org.opensearch.repositories.RepositoryException) RepositoryMissingException(org.opensearch.repositories.RepositoryMissingException) FailedToCommitClusterStateException(org.opensearch.cluster.coordination.FailedToCommitClusterStateException) IOException(java.io.IOException) NotMasterException(org.opensearch.cluster.NotMasterException) RepositoryException(org.opensearch.repositories.RepositoryException) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) StepListener(org.opensearch.action.StepListener) ShardSnapshotStatus(org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 43 with Tuple

use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.

the class SnapshotsService method startCloning.

/**
 * Determine the number of shards in each index of a clone operation and update the cluster state accordingly.
 *
 * @param repository     repository to run operation on
 * @param cloneEntry     clone operation in the cluster state
 */
private void startCloning(Repository repository, SnapshotsInProgress.Entry cloneEntry) {
    final List<IndexId> indices = cloneEntry.indices();
    final SnapshotId sourceSnapshot = cloneEntry.source();
    final Snapshot targetSnapshot = cloneEntry.snapshot();
    final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
    // Exception handler for IO exceptions with loading index and repo metadata
    final Consumer<Exception> onFailure = e -> {
        initializingClones.remove(targetSnapshot);
        logger.info(() -> new ParameterizedMessage("Failed to start snapshot clone [{}]", cloneEntry), e);
        removeFailedSnapshotFromClusterState(targetSnapshot, e, null, null);
    };
    // 1. step, load SnapshotInfo to make sure that source snapshot was successful for the indices we want to clone
    // TODO: we could skip this step for snapshots with state SUCCESS
    final StepListener<SnapshotInfo> snapshotInfoListener = new StepListener<>();
    executor.execute(ActionRunnable.supply(snapshotInfoListener, () -> repository.getSnapshotInfo(sourceSnapshot)));
    final StepListener<Collection<Tuple<IndexId, Integer>>> allShardCountsListener = new StepListener<>();
    final GroupedActionListener<Tuple<IndexId, Integer>> shardCountListener = new GroupedActionListener<>(allShardCountsListener, indices.size());
    snapshotInfoListener.whenComplete(snapshotInfo -> {
        for (IndexId indexId : indices) {
            if (RestoreService.failed(snapshotInfo, indexId.getName())) {
                throw new SnapshotException(targetSnapshot, "Can't clone index [" + indexId + "] because its snapshot was not successful.");
            }
        }
        // 2. step, load the number of shards we have in each index to be cloned from the index metadata.
        repository.getRepositoryData(ActionListener.wrap(repositoryData -> {
            for (IndexId index : indices) {
                executor.execute(ActionRunnable.supply(shardCountListener, () -> {
                    final IndexMetadata metadata = repository.getSnapshotIndexMetaData(repositoryData, sourceSnapshot, index);
                    return Tuple.tuple(index, metadata.getNumberOfShards());
                }));
            }
        }, onFailure));
    }, onFailure);
    // 3. step, we have all the shard counts, now update the cluster state to have clone jobs in the snap entry
    allShardCountsListener.whenComplete(counts -> repository.executeConsistentStateUpdate(repoData -> new ClusterStateUpdateTask() {

        private SnapshotsInProgress.Entry updatedEntry;

        @Override
        public ClusterState execute(ClusterState currentState) {
            final SnapshotsInProgress snapshotsInProgress = currentState.custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY);
            final List<SnapshotsInProgress.Entry> updatedEntries = new ArrayList<>(snapshotsInProgress.entries());
            boolean changed = false;
            final String localNodeId = currentState.nodes().getLocalNodeId();
            final String repoName = cloneEntry.repository();
            final ShardGenerations shardGenerations = repoData.shardGenerations();
            for (int i = 0; i < updatedEntries.size(); i++) {
                if (cloneEntry.snapshot().equals(updatedEntries.get(i).snapshot())) {
                    final ImmutableOpenMap.Builder<RepositoryShardId, ShardSnapshotStatus> clonesBuilder = ImmutableOpenMap.builder();
                    final InFlightShardSnapshotStates inFlightShardStates = InFlightShardSnapshotStates.forRepo(repoName, snapshotsInProgress.entries());
                    for (Tuple<IndexId, Integer> count : counts) {
                        for (int shardId = 0; shardId < count.v2(); shardId++) {
                            final RepositoryShardId repoShardId = new RepositoryShardId(count.v1(), shardId);
                            final String indexName = repoShardId.indexName();
                            if (inFlightShardStates.isActive(indexName, shardId)) {
                                clonesBuilder.put(repoShardId, ShardSnapshotStatus.UNASSIGNED_QUEUED);
                            } else {
                                clonesBuilder.put(repoShardId, new ShardSnapshotStatus(localNodeId, inFlightShardStates.generationForShard(repoShardId.index(), shardId, shardGenerations)));
                            }
                        }
                    }
                    updatedEntry = cloneEntry.withClones(clonesBuilder.build());
                    updatedEntries.set(i, updatedEntry);
                    changed = true;
                    break;
                }
            }
            return updateWithSnapshots(currentState, changed ? SnapshotsInProgress.of(updatedEntries) : null, null);
        }

        @Override
        public void onFailure(String source, Exception e) {
            initializingClones.remove(targetSnapshot);
            logger.info(() -> new ParameterizedMessage("Failed to start snapshot clone [{}]", cloneEntry), e);
            failAllListenersOnMasterFailOver(e);
        }

        @Override
        public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
            initializingClones.remove(targetSnapshot);
            if (updatedEntry != null) {
                final Snapshot target = updatedEntry.snapshot();
                final SnapshotId sourceSnapshot = updatedEntry.source();
                for (ObjectObjectCursor<RepositoryShardId, ShardSnapshotStatus> indexClone : updatedEntry.clones()) {
                    final ShardSnapshotStatus shardStatusBefore = indexClone.value;
                    if (shardStatusBefore.state() != ShardState.INIT) {
                        continue;
                    }
                    final RepositoryShardId repoShardId = indexClone.key;
                    runReadyClone(target, sourceSnapshot, shardStatusBefore, repoShardId, repository);
                }
            } else {
                // Extremely unlikely corner case of master failing over between between starting the clone and
                // starting shard clones.
                logger.warn("Did not find expected entry [{}] in the cluster state", cloneEntry);
            }
        }
    }, "start snapshot clone", onFailure), onFailure);
}
Also used : RepositoryMissingException(org.opensearch.repositories.RepositoryMissingException) ImmutableOpenMap(org.opensearch.common.collect.ImmutableOpenMap) Arrays(java.util.Arrays) Metadata(org.opensearch.cluster.metadata.Metadata) Collections.unmodifiableList(java.util.Collections.unmodifiableList) DataStream(org.opensearch.cluster.metadata.DataStream) Version(org.opensearch.Version) ClusterStateApplier(org.opensearch.cluster.ClusterStateApplier) Regex(org.opensearch.common.regex.Regex) Strings(org.opensearch.common.Strings) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) Map(java.util.Map) ActionListener(org.opensearch.action.ActionListener) EnumSet(java.util.EnumSet) Repository(org.opensearch.repositories.Repository) TimeValue(org.opensearch.common.unit.TimeValue) Index(org.opensearch.index.Index) ExceptionsHelper(org.opensearch.ExceptionsHelper) Set(java.util.Set) ClusterStateTaskExecutor(org.opensearch.cluster.ClusterStateTaskExecutor) Settings(org.opensearch.common.settings.Settings) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) TransportService(org.opensearch.transport.TransportService) FailedToCommitClusterStateException(org.opensearch.cluster.coordination.FailedToCommitClusterStateException) ActionFilters(org.opensearch.action.support.ActionFilters) AbstractLifecycleComponent(org.opensearch.common.component.AbstractLifecycleComponent) ShardState(org.opensearch.cluster.SnapshotsInProgress.ShardState) Logger(org.apache.logging.log4j.Logger) Stream(java.util.stream.Stream) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) StepListener(org.opensearch.action.StepListener) State(org.opensearch.cluster.SnapshotsInProgress.State) IndexNameExpressionResolver(org.opensearch.cluster.metadata.IndexNameExpressionResolver) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) RepositoriesService(org.opensearch.repositories.RepositoriesService) ThreadPool(org.opensearch.threadpool.ThreadPool) Priority(org.opensearch.common.Priority) TransportMasterNodeAction(org.opensearch.action.support.master.TransportMasterNodeAction) ArrayList(java.util.ArrayList) ClusterState(org.opensearch.cluster.ClusterState) LegacyESVersion(org.opensearch.LegacyESVersion) ClusterStateTaskConfig(org.opensearch.cluster.ClusterStateTaskConfig) RepositoryCleanupInProgress(org.opensearch.cluster.RepositoryCleanupInProgress) RepositoriesMetadata(org.opensearch.cluster.metadata.RepositoriesMetadata) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) DeleteSnapshotRequest(org.opensearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest) ClusterService(org.opensearch.cluster.service.ClusterService) RestoreInProgress(org.opensearch.cluster.RestoreInProgress) RoutingTable(org.opensearch.cluster.routing.RoutingTable) SnapshotsInProgress.completed(org.opensearch.cluster.SnapshotsInProgress.completed) ClusterChangedEvent(org.opensearch.cluster.ClusterChangedEvent) ShardGenerations(org.opensearch.repositories.ShardGenerations) AbstractRunnable(org.opensearch.common.util.concurrent.AbstractRunnable) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) DiscoveryNode(org.opensearch.cluster.node.DiscoveryNode) IndexId(org.opensearch.repositories.IndexId) Locale(java.util.Locale) NotMasterException(org.opensearch.cluster.NotMasterException) ShardSnapshotStatus(org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus) RepositoryException(org.opensearch.repositories.RepositoryException) IndexShardRoutingTable(org.opensearch.cluster.routing.IndexShardRoutingTable) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) Collectors(java.util.stream.Collectors) Nullable(org.opensearch.common.Nullable) Tuple(org.opensearch.common.collect.Tuple) Objects(java.util.Objects) List(java.util.List) Optional(java.util.Optional) ClusterStateTaskListener(org.opensearch.cluster.ClusterStateTaskListener) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) ActionRunnable(org.opensearch.action.ActionRunnable) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) CloneSnapshotRequest(org.opensearch.action.admin.cluster.snapshots.clone.CloneSnapshotRequest) HashMap(java.util.HashMap) SnapshotDeletionsInProgress(org.opensearch.cluster.SnapshotDeletionsInProgress) Deque(java.util.Deque) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Function(java.util.function.Function) HashSet(java.util.HashSet) IndexRoutingTable(org.opensearch.cluster.routing.IndexRoutingTable) UUIDs(org.opensearch.common.UUIDs) LinkedList(java.util.LinkedList) StreamInput(org.opensearch.common.io.stream.StreamInput) RepositoryData(org.opensearch.repositories.RepositoryData) Setting(org.opensearch.common.settings.Setting) Iterator(java.util.Iterator) Collections.emptySet(java.util.Collections.emptySet) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) ShardRouting(org.opensearch.cluster.routing.ShardRouting) ShardId(org.opensearch.index.shard.ShardId) Consumer(java.util.function.Consumer) CreateSnapshotRequest(org.opensearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest) LogManager(org.apache.logging.log4j.LogManager) Collections(java.util.Collections) ShardGenerations(org.opensearch.repositories.ShardGenerations) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) ImmutableOpenMap(org.opensearch.common.collect.ImmutableOpenMap) ClusterStateTaskExecutor(org.opensearch.cluster.ClusterStateTaskExecutor) Executor(java.util.concurrent.Executor) GroupedActionListener(org.opensearch.action.support.GroupedActionListener) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) IndexId(org.opensearch.repositories.IndexId) ClusterState(org.opensearch.cluster.ClusterState) ClusterStateUpdateTask(org.opensearch.cluster.ClusterStateUpdateTask) RepositoryMissingException(org.opensearch.repositories.RepositoryMissingException) FailedToCommitClusterStateException(org.opensearch.cluster.coordination.FailedToCommitClusterStateException) IOException(java.io.IOException) NotMasterException(org.opensearch.cluster.NotMasterException) RepositoryException(org.opensearch.repositories.RepositoryException) ClusterBlockException(org.opensearch.cluster.block.ClusterBlockException) RepositoryShardId(org.opensearch.repositories.RepositoryShardId) Collection(java.util.Collection) SnapshotsInProgress(org.opensearch.cluster.SnapshotsInProgress) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) StepListener(org.opensearch.action.StepListener) ShardSnapshotStatus(org.opensearch.cluster.SnapshotsInProgress.ShardSnapshotStatus) Tuple(org.opensearch.common.collect.Tuple)

Example 44 with Tuple

use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.

the class GetDataStreamsRequestTests method testGetDataStreamsWithWildcards.

public void testGetDataStreamsWithWildcards() {
    final String[] dataStreamNames = { "my-data-stream", "another-data-stream" };
    ClusterState cs = getClusterStateWithDataStreams(org.opensearch.common.collect.List.of(new Tuple<>(dataStreamNames[0], 1), new Tuple<>(dataStreamNames[1], 1)), org.opensearch.common.collect.List.of());
    GetDataStreamAction.Request req = new GetDataStreamAction.Request(new String[] { dataStreamNames[1].substring(0, 5) + "*" });
    List<DataStream> dataStreams = GetDataStreamAction.TransportAction.getDataStreams(cs, new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), req);
    assertThat(dataStreams.size(), equalTo(1));
    assertThat(dataStreams.get(0).getName(), equalTo(dataStreamNames[1]));
    req = new GetDataStreamAction.Request(new String[] { "*" });
    dataStreams = GetDataStreamAction.TransportAction.getDataStreams(cs, new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), req);
    assertThat(dataStreams.size(), equalTo(2));
    assertThat(dataStreams.get(0).getName(), equalTo(dataStreamNames[1]));
    assertThat(dataStreams.get(1).getName(), equalTo(dataStreamNames[0]));
    req = new GetDataStreamAction.Request((String[]) null);
    dataStreams = GetDataStreamAction.TransportAction.getDataStreams(cs, new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), req);
    assertThat(dataStreams.size(), equalTo(2));
    assertThat(dataStreams.get(0).getName(), equalTo(dataStreamNames[1]));
    assertThat(dataStreams.get(1).getName(), equalTo(dataStreamNames[0]));
    req = new GetDataStreamAction.Request(new String[] { "matches-none*" });
    dataStreams = GetDataStreamAction.TransportAction.getDataStreams(cs, new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)), req);
    assertThat(dataStreams.size(), equalTo(0));
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) DataStream(org.opensearch.cluster.metadata.DataStream) Request(org.opensearch.action.admin.indices.datastream.GetDataStreamAction.Request) ThreadContext(org.opensearch.common.util.concurrent.ThreadContext) Request(org.opensearch.action.admin.indices.datastream.GetDataStreamAction.Request) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexNameExpressionResolver(org.opensearch.cluster.metadata.IndexNameExpressionResolver) Tuple(org.opensearch.common.collect.Tuple)

Example 45 with Tuple

use of org.opensearch.common.collect.Tuple in project OpenSearch by opensearch-project.

the class PutMappingRequestTests method testResolveIndicesWithWriteIndexOnlyAndDataStreamAndIndex.

public void testResolveIndicesWithWriteIndexOnlyAndDataStreamAndIndex() {
    String[] dataStreamNames = { "foo", "bar", "baz" };
    List<Tuple<String, Integer>> dsMetadata = org.opensearch.common.collect.List.of(tuple(dataStreamNames[0], randomIntBetween(1, 3)), tuple(dataStreamNames[1], randomIntBetween(1, 3)), tuple(dataStreamNames[2], randomIntBetween(1, 3)));
    ClusterState cs = DeleteDataStreamRequestTests.getClusterStateWithDataStreams(dsMetadata, org.opensearch.common.collect.List.of("index1", "index2", "index3"));
    cs = addAliases(cs, org.opensearch.common.collect.List.of(tuple("alias1", org.opensearch.common.collect.List.of(tuple("index1", false), tuple("index2", true))), tuple("alias2", org.opensearch.common.collect.List.of(tuple("index2", false), tuple("index3", true)))));
    PutMappingRequest request = new PutMappingRequest().indices("foo", "index3").writeIndexOnly(true);
    Index[] indices = TransportPutMappingAction.resolveIndices(cs, request, new IndexNameExpressionResolver(new ThreadContext(Settings.EMPTY)));
    List<String> indexNames = Arrays.stream(indices).map(Index::getName).collect(Collectors.toList());
    IndexAbstraction expectedDs = cs.metadata().getIndicesLookup().get("foo");
    List<String> expectedIndices = expectedDs.getIndices().stream().map(im -> im.getIndex().getName()).collect(Collectors.toList());
    expectedIndices.addAll(org.opensearch.common.collect.List.of("index1", "index2", "index3"));
    // should resolve the data stream and each alias to _all_ their respective indices
    assertThat(indexNames, containsInAnyOrder(expectedDs.getWriteIndex().getIndex().getName(), "index3"));
}
Also used : Arrays(java.util.Arrays) IndexAbstraction(org.opensearch.cluster.metadata.IndexAbstraction) Metadata(org.opensearch.cluster.metadata.Metadata) BytesReference(org.opensearch.common.bytes.BytesReference) IndexMetadata(org.opensearch.cluster.metadata.IndexMetadata) AliasMetadata(org.opensearch.cluster.metadata.AliasMetadata) ThreadContext(org.opensearch.common.util.concurrent.ThreadContext) ActionRequestValidationException(org.opensearch.action.ActionRequestValidationException) Strings(org.opensearch.common.Strings) XContentParser(org.opensearch.common.xcontent.XContentParser) ClusterState(org.opensearch.cluster.ClusterState) RandomCreateIndexGenerator(org.opensearch.index.RandomCreateIndexGenerator) DeleteDataStreamRequestTests(org.opensearch.action.admin.indices.datastream.DeleteDataStreamRequestTests) Index(org.opensearch.index.Index) OpenSearchTestCase(org.opensearch.test.OpenSearchTestCase) Settings(org.opensearch.common.settings.Settings) IOException(java.io.IOException) Tuple.tuple(org.opensearch.common.collect.Tuple.tuple) EMPTY_PARAMS(org.opensearch.common.xcontent.ToXContent.EMPTY_PARAMS) Collectors(java.util.stream.Collectors) Tuple(org.opensearch.common.collect.Tuple) XContentBuilder(org.opensearch.common.xcontent.XContentBuilder) List(java.util.List) Matchers.containsInAnyOrder(org.hamcrest.Matchers.containsInAnyOrder) JsonXContent(org.opensearch.common.xcontent.json.JsonXContent) XContentType(org.opensearch.common.xcontent.XContentType) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexNameExpressionResolver(org.opensearch.cluster.metadata.IndexNameExpressionResolver) ClusterState(org.opensearch.cluster.ClusterState) ThreadContext(org.opensearch.common.util.concurrent.ThreadContext) Index(org.opensearch.index.Index) Matchers.containsString(org.hamcrest.Matchers.containsString) IndexAbstraction(org.opensearch.cluster.metadata.IndexAbstraction) IndexNameExpressionResolver(org.opensearch.cluster.metadata.IndexNameExpressionResolver) Tuple(org.opensearch.common.collect.Tuple)

Aggregations

Tuple (org.opensearch.common.collect.Tuple)151 ArrayList (java.util.ArrayList)65 List (java.util.List)49 IOException (java.io.IOException)45 Collections (java.util.Collections)44 HashMap (java.util.HashMap)40 Map (java.util.Map)40 Settings (org.opensearch.common.settings.Settings)38 ClusterState (org.opensearch.cluster.ClusterState)34 HashSet (java.util.HashSet)28 ShardId (org.opensearch.index.shard.ShardId)28 Arrays (java.util.Arrays)27 Collectors (java.util.stream.Collectors)26 Set (java.util.Set)25 Index (org.opensearch.index.Index)25 BytesReference (org.opensearch.common.bytes.BytesReference)24 OpenSearchTestCase (org.opensearch.test.OpenSearchTestCase)24 CountDownLatch (java.util.concurrent.CountDownLatch)22 Version (org.opensearch.Version)21 Strings (org.opensearch.common.Strings)21