Search in sources :

Example 1 with GroupedActionListener

use of org.elasticsearch.action.support.GroupedActionListener in project crate by crate.

the class SysSnapshots method currentSnapshots.

public CompletableFuture<Iterable<SysSnapshot>> currentSnapshots() {
    ArrayList<CompletableFuture<Collection<SysSnapshot>>> futureActionListeners = new ArrayList<>();
    for (Repository repository : getRepositories.get()) {
        var future = new CompletableFuture<Collection<SysSnapshot>>();
        ActionListener<RepositoryData> listener = ActionListener.wrap(repositoryData -> {
            Collection<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
            if (snapshotIds.isEmpty()) {
                future.complete(List.of());
                return;
            }
            GroupedActionListener<SysSnapshot> snapshotListener = new GroupedActionListener<>(ActionListener.wrap(future::complete, future::completeExceptionally), snapshotIds.size());
            for (SnapshotId snapshotId : snapshotIds) {
                createSysSnapshot(repository, snapshotId, snapshotListener);
            }
        }, future::completeExceptionally);
        try {
            repository.getRepositoryData(listener);
        } catch (Exception ex) {
            if (LOGGER.isDebugEnabled()) {
                LOGGER.debug("Couldn't load repository data. repository={} error={}", repository, ex);
            }
            // ignore - `sys.snapshots` shouldn't fail because of an illegal repository definition
            continue;
        }
        futureActionListeners.add(future);
    }
    return CompletableFutures.allAsList(futureActionListeners).thenApply(data -> {
        ArrayList<SysSnapshot> result = new ArrayList<>();
        for (Collection<SysSnapshot> datum : data) {
            result.addAll(datum);
        }
        return result;
    });
}
Also used : ArrayList(java.util.ArrayList) SnapshotException(org.elasticsearch.snapshots.SnapshotException) RepositoryData(org.elasticsearch.repositories.RepositoryData) SnapshotId(org.elasticsearch.snapshots.SnapshotId) CompletableFuture(java.util.concurrent.CompletableFuture) Repository(org.elasticsearch.repositories.Repository) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener)

Example 2 with GroupedActionListener

use of org.elasticsearch.action.support.GroupedActionListener in project crate by crate.

the class BlobStoreRepository method doDeleteShardSnapshots.

/**
 * After updating the {@link RepositoryData} each of the shards directories is individually first moved to the next shard generation
 * and then has all now unreferenced blobs in it deleted.
 *
 * @param snapshotId        SnapshotId to delete
 * @param repositoryStateId Expected repository state id
 * @param foundIndices      All indices folders found in the repository before executing any writes to the repository during this
 *                          delete operation
 * @param rootBlobs         All blobs found at the root of the repository before executing any writes to the repository during this
 *                          delete operation
 * @param repositoryData    RepositoryData found the in the repository before executing this delete
 * @param listener          Listener to invoke once finished
 */
private void doDeleteShardSnapshots(SnapshotId snapshotId, long repositoryStateId, Map<String, BlobContainer> foundIndices, Map<String, BlobMetadata> rootBlobs, RepositoryData repositoryData, boolean writeShardGens, ActionListener<Void> listener) {
    if (writeShardGens) {
        // First write the new shard state metadata (with the removed snapshot) and compute deletion targets
        final StepListener<Collection<ShardSnapshotMetaDeleteResult>> writeShardMetadataAndComputeDeletesStep = new StepListener<>();
        writeUpdatedShardMetadataAndComputeDeletes(snapshotId, repositoryData, true, writeShardMetadataAndComputeDeletesStep);
        // Once we have put the new shard-level metadata into place, we can update the repository metadata as follows:
        // 1. Remove the snapshot from the list of existing snapshots
        // 2. Update the index shard generations of all updated shard folders
        // 
        // Note: If we fail updating any of the individual shard paths, none of them are changed since the newly created
        // index-${gen_uuid} will not be referenced by the existing RepositoryData and new RepositoryData is only
        // written if all shard paths have been successfully updated.
        final StepListener<RepositoryData> writeUpdatedRepoDataStep = new StepListener<>();
        writeShardMetadataAndComputeDeletesStep.whenComplete(deleteResults -> {
            final ShardGenerations.Builder builder = ShardGenerations.builder();
            for (ShardSnapshotMetaDeleteResult newGen : deleteResults) {
                builder.put(newGen.indexId, newGen.shardId, newGen.newGeneration);
            }
            final RepositoryData updatedRepoData = repositoryData.removeSnapshot(snapshotId, builder.build());
            writeIndexGen(updatedRepoData, repositoryStateId, true, ActionListener.wrap(v -> writeUpdatedRepoDataStep.onResponse(updatedRepoData), listener::onFailure));
        }, listener::onFailure);
        // Once we have updated the repository, run the clean-ups
        writeUpdatedRepoDataStep.whenComplete(updatedRepoData -> {
            // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion
            final ActionListener<Void> afterCleanupsListener = new GroupedActionListener<>(ActionListener.wrap(() -> listener.onResponse(null)), 2);
            asyncCleanupUnlinkedRootAndIndicesBlobs(foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener);
            asyncCleanupUnlinkedShardLevelBlobs(snapshotId, writeShardMetadataAndComputeDeletesStep.result(), afterCleanupsListener);
        }, listener::onFailure);
    } else {
        // Write the new repository data first (with the removed snapshot), using no shard generations
        final RepositoryData updatedRepoData = repositoryData.removeSnapshot(snapshotId, ShardGenerations.EMPTY);
        writeIndexGen(updatedRepoData, repositoryStateId, false, ActionListener.wrap(v -> {
            // Run unreferenced blobs cleanup in parallel to shard-level snapshot deletion
            final ActionListener<Void> afterCleanupsListener = new GroupedActionListener<>(ActionListener.wrap(() -> listener.onResponse(null)), 2);
            asyncCleanupUnlinkedRootAndIndicesBlobs(foundIndices, rootBlobs, updatedRepoData, afterCleanupsListener);
            final StepListener<Collection<ShardSnapshotMetaDeleteResult>> writeMetaAndComputeDeletesStep = new StepListener<>();
            writeUpdatedShardMetadataAndComputeDeletes(snapshotId, repositoryData, false, writeMetaAndComputeDeletesStep);
            writeMetaAndComputeDeletesStep.whenComplete(deleteResults -> asyncCleanupUnlinkedShardLevelBlobs(snapshotId, deleteResults, afterCleanupsListener), afterCleanupsListener::onFailure);
        }, listener::onFailure));
    }
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Map(java.util.Map) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) IOContext(org.apache.lucene.store.IOContext) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Stream(java.util.stream.Stream) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) BlobStore(org.elasticsearch.common.blobstore.BlobStore) SnapshotException(org.elasticsearch.snapshots.SnapshotException) FileInfo.canonicalName(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) IndexCommit(org.apache.lucene.index.IndexCommit) XContentFactory(org.elasticsearch.common.xcontent.XContentFactory) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Tuple(io.crate.common.collections.Tuple) ShardGenerations(org.elasticsearch.repositories.ShardGenerations) ClusterService(org.elasticsearch.cluster.service.ClusterService) SnapshotShardFailure(org.elasticsearch.snapshots.SnapshotShardFailure) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) LoggingDeprecationHandler(org.elasticsearch.common.xcontent.LoggingDeprecationHandler) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) Metadata(org.elasticsearch.cluster.metadata.Metadata) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Store(org.elasticsearch.index.store.Store) Nullable(javax.annotation.Nullable) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) XContentParser(org.elasticsearch.common.xcontent.XContentParser) AtomicLong(java.util.concurrent.atomic.AtomicLong) CounterMetric(org.elasticsearch.common.metrics.CounterMetric) ActionListener(org.elasticsearch.action.ActionListener) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) RepositoryMetadata(org.elasticsearch.cluster.metadata.RepositoryMetadata) Settings(org.elasticsearch.common.settings.Settings) Locale(java.util.Locale) Streams(org.elasticsearch.common.io.Streams) ThreadPool(org.elasticsearch.threadpool.ThreadPool) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) ActionRunnable(org.elasticsearch.action.ActionRunnable) StepListener(org.elasticsearch.action.StepListener) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) RepositoryException(org.elasticsearch.repositories.RepositoryException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) NotXContentException(org.elasticsearch.common.compress.NotXContentException) Setting(org.elasticsearch.common.settings.Setting) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BlobMetadata(org.elasticsearch.common.blobstore.BlobMetadata) BytesReference(org.elasticsearch.common.bytes.BytesReference) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Version(org.elasticsearch.Version) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) RepositoryData(org.elasticsearch.repositories.RepositoryData) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) Index(org.elasticsearch.index.Index) Lucene(org.elasticsearch.common.lucene.Lucene) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexId(org.elasticsearch.repositories.IndexId) FilterInputStream(java.io.FilterInputStream) RepositoriesMetadata(org.elasticsearch.cluster.metadata.RepositoriesMetadata) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) BlobPath(org.elasticsearch.common.blobstore.BlobPath) IndexOutput(org.apache.lucene.store.IndexOutput) Numbers(org.elasticsearch.common.Numbers) Repository(org.elasticsearch.repositories.Repository) SnapshotsService(org.elasticsearch.snapshots.SnapshotsService) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) RepositoryOperation(org.elasticsearch.repositories.RepositoryOperation) Snapshot(org.elasticsearch.snapshots.Snapshot) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) ShardGenerations(org.elasticsearch.repositories.ShardGenerations) RepositoryData(org.elasticsearch.repositories.RepositoryData) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) ActionListener(org.elasticsearch.action.ActionListener) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) Collection(java.util.Collection) StepListener(org.elasticsearch.action.StepListener)

Example 3 with GroupedActionListener

use of org.elasticsearch.action.support.GroupedActionListener in project crate by crate.

the class BlobStoreRepository method finalizeSnapshot.

/**
 * {@inheritDoc}
 */
@Override
public void finalizeSnapshot(final SnapshotId snapshotId, final ShardGenerations shardGenerations, final long startTime, final String failure, final int totalShards, final List<SnapshotShardFailure> shardFailures, final long repositoryStateId, final boolean includeGlobalState, final Metadata clusterMetadata, boolean writeShardGens, final ActionListener<SnapshotInfo> listener) {
    final Collection<IndexId> indices = shardGenerations.indices();
    // Once we are done writing the updated index-N blob we remove the now unreferenced index-${uuid} blobs in each shard
    // directory if all nodes are at least at version SnapshotsService#SHARD_GEN_IN_REPO_DATA_VERSION
    // If there are older version nodes in the cluster, we don't need to run this cleanup as it will have already happened
    // when writing the index-${N} to each shard directory.
    final Consumer<Exception> onUpdateFailure = e -> listener.onFailure(new SnapshotException(metadata.name(), snapshotId, "failed to update snapshot in repository", e));
    final ActionListener<SnapshotInfo> allMetaListener = new GroupedActionListener<>(ActionListener.wrap(snapshotInfos -> {
        assert snapshotInfos.size() == 1 : "Should have only received a single SnapshotInfo but received " + snapshotInfos;
        final SnapshotInfo snapshotInfo = snapshotInfos.iterator().next();
        getRepositoryData(ActionListener.wrap(existingRepositoryData -> {
            final RepositoryData updatedRepositoryData = existingRepositoryData.addSnapshot(snapshotId, snapshotInfo.state(), Version.CURRENT, shardGenerations);
            writeIndexGen(updatedRepositoryData, repositoryStateId, writeShardGens, ActionListener.wrap(v -> {
                if (writeShardGens) {
                    cleanupOldShardGens(existingRepositoryData, updatedRepositoryData);
                }
                listener.onResponse(snapshotInfo);
            }, onUpdateFailure));
        }, onUpdateFailure));
    }, onUpdateFailure), 2 + indices.size());
    final Executor executor = threadPool.executor(ThreadPool.Names.SNAPSHOT);
    // We ignore all FileAlreadyExistsException when writing metadata since otherwise a master failover while in this method will
    // mean that no snap-${uuid}.dat blob is ever written for this snapshot. This is safe because any updated version of the
    // index or global metadata will be compatible with the segments written in this snapshot as well.
    // Failing on an already existing index-${repoGeneration} below ensures that the index.latest blob is not updated in a way
    // that decrements the generation it points at
    // Write Global Metadata
    executor.execute(ActionRunnable.run(allMetaListener, () -> globalMetadataFormat.write(clusterMetadata, blobContainer(), snapshotId.getUUID(), false)));
    // write the index metadata for each index in the snapshot
    for (IndexId index : indices) {
        executor.execute(ActionRunnable.run(allMetaListener, () -> indexMetadataFormat.write(clusterMetadata.index(index.getName()), indexContainer(index), snapshotId.getUUID(), false)));
    }
    executor.execute(ActionRunnable.supply(allMetaListener, () -> {
        final SnapshotInfo snapshotInfo = new SnapshotInfo(snapshotId, indices.stream().map(IndexId::getName).collect(Collectors.toList()), startTime, failure, threadPool.absoluteTimeInMillis(), totalShards, shardFailures, includeGlobalState);
        snapshotFormat.write(snapshotInfo, blobContainer(), snapshotId.getUUID(), false);
        return snapshotInfo;
    }));
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) SnapshotFiles(org.elasticsearch.index.snapshots.blobstore.SnapshotFiles) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) ByteSizeUnit(org.elasticsearch.common.unit.ByteSizeUnit) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Map(java.util.Map) BlobContainer(org.elasticsearch.common.blobstore.BlobContainer) RateLimitingInputStream(org.elasticsearch.index.snapshots.blobstore.RateLimitingInputStream) IOContext(org.apache.lucene.store.IOContext) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) StandardCharsets(java.nio.charset.StandardCharsets) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Stream(java.util.stream.Stream) Logger(org.apache.logging.log4j.Logger) InputStreamIndexInput(org.elasticsearch.common.lucene.store.InputStreamIndexInput) BlobStore(org.elasticsearch.common.blobstore.BlobStore) SnapshotException(org.elasticsearch.snapshots.SnapshotException) FileInfo.canonicalName(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName) IndexCommit(org.apache.lucene.index.IndexCommit) XContentFactory(org.elasticsearch.common.xcontent.XContentFactory) SnapshotId(org.elasticsearch.snapshots.SnapshotId) Tuple(io.crate.common.collections.Tuple) ShardGenerations(org.elasticsearch.repositories.ShardGenerations) ClusterService(org.elasticsearch.cluster.service.ClusterService) SnapshotShardFailure(org.elasticsearch.snapshots.SnapshotShardFailure) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) LoggingDeprecationHandler(org.elasticsearch.common.xcontent.LoggingDeprecationHandler) ArrayList(java.util.ArrayList) BytesArray(org.elasticsearch.common.bytes.BytesArray) Metadata(org.elasticsearch.cluster.metadata.Metadata) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Store(org.elasticsearch.index.store.Store) Nullable(javax.annotation.Nullable) LongStream(java.util.stream.LongStream) IndexInput(org.apache.lucene.store.IndexInput) SetOnce(org.apache.lucene.util.SetOnce) Executor(java.util.concurrent.Executor) IOException(java.io.IOException) XContentParser(org.elasticsearch.common.xcontent.XContentParser) AtomicLong(java.util.concurrent.atomic.AtomicLong) CounterMetric(org.elasticsearch.common.metrics.CounterMetric) ActionListener(org.elasticsearch.action.ActionListener) FsBlobContainer(org.elasticsearch.common.blobstore.fs.FsBlobContainer) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) RepositoryMetadata(org.elasticsearch.cluster.metadata.RepositoryMetadata) Settings(org.elasticsearch.common.settings.Settings) Locale(java.util.Locale) Streams(org.elasticsearch.common.io.Streams) ThreadPool(org.elasticsearch.threadpool.ThreadPool) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) ActionRunnable(org.elasticsearch.action.ActionRunnable) StepListener(org.elasticsearch.action.StepListener) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) RepositoryException(org.elasticsearch.repositories.RepositoryException) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) NotXContentException(org.elasticsearch.common.compress.NotXContentException) Setting(org.elasticsearch.common.settings.Setting) Collection(java.util.Collection) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) BlobMetadata(org.elasticsearch.common.blobstore.BlobMetadata) BytesReference(org.elasticsearch.common.bytes.BytesReference) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) Collectors(java.util.stream.Collectors) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) BlobStoreIndexShardSnapshot(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot) Version(org.elasticsearch.Version) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) RepositoryData(org.elasticsearch.repositories.RepositoryData) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) XContentType(org.elasticsearch.common.xcontent.XContentType) IndexShardSnapshotStatus(org.elasticsearch.index.snapshots.IndexShardSnapshotStatus) Index(org.elasticsearch.index.Index) Lucene(org.elasticsearch.common.lucene.Lucene) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) IndexId(org.elasticsearch.repositories.IndexId) FilterInputStream(java.io.FilterInputStream) RepositoriesMetadata(org.elasticsearch.cluster.metadata.RepositoriesMetadata) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) BlobPath(org.elasticsearch.common.blobstore.BlobPath) IndexOutput(org.apache.lucene.store.IndexOutput) Numbers(org.elasticsearch.common.Numbers) Repository(org.elasticsearch.repositories.Repository) SnapshotsService(org.elasticsearch.snapshots.SnapshotsService) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) ExceptionsHelper(org.elasticsearch.ExceptionsHelper) SlicedInputStream(org.elasticsearch.index.snapshots.blobstore.SlicedInputStream) SnapshotsInProgress(org.elasticsearch.cluster.SnapshotsInProgress) BlobStoreIndexShardSnapshots(org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshots) Collections(java.util.Collections) LogManager(org.apache.logging.log4j.LogManager) RepositoryOperation(org.elasticsearch.repositories.RepositoryOperation) Snapshot(org.elasticsearch.snapshots.Snapshot) RateLimiter(org.apache.lucene.store.RateLimiter) InputStream(java.io.InputStream) IndexId(org.elasticsearch.repositories.IndexId) SnapshotInfo(org.elasticsearch.snapshots.SnapshotInfo) Executor(java.util.concurrent.Executor) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) IndexShardSnapshotFailedException(org.elasticsearch.index.snapshots.IndexShardSnapshotFailedException) IndexFormatTooNewException(org.apache.lucene.index.IndexFormatTooNewException) InvalidArgumentException(io.crate.exceptions.InvalidArgumentException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) IOException(java.io.IOException) SnapshotMissingException(org.elasticsearch.snapshots.SnapshotMissingException) NoSuchFileException(java.nio.file.NoSuchFileException) ConcurrentSnapshotExecutionException(org.elasticsearch.snapshots.ConcurrentSnapshotExecutionException) CorruptIndexException(org.apache.lucene.index.CorruptIndexException) IndexShardRestoreFailedException(org.elasticsearch.index.snapshots.IndexShardRestoreFailedException) RepositoryException(org.elasticsearch.repositories.RepositoryException) NotXContentException(org.elasticsearch.common.compress.NotXContentException) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) RepositoryVerificationException(org.elasticsearch.repositories.RepositoryVerificationException) IndexFormatTooOldException(org.apache.lucene.index.IndexFormatTooOldException) SnapshotException(org.elasticsearch.snapshots.SnapshotException) IndexShardSnapshotException(org.elasticsearch.index.snapshots.IndexShardSnapshotException) RepositoryData(org.elasticsearch.repositories.RepositoryData)

Example 4 with GroupedActionListener

use of org.elasticsearch.action.support.GroupedActionListener in project crate by crate.

the class NodeConnectionsService method connectToNodes.

/**
 * Connect to all the given nodes, but do not disconnect from any extra nodes. Calls the completion handler on completion of all
 * connection attempts to _new_ nodes, but not on attempts to re-establish connections to nodes that are already known.
 */
public void connectToNodes(DiscoveryNodes discoveryNodes, Runnable onCompletion) {
    if (discoveryNodes.getSize() == 0) {
        onCompletion.run();
        return;
    }
    final GroupedActionListener<Void> listener = new GroupedActionListener<>(ActionListener.wrap(onCompletion), discoveryNodes.getSize());
    final List<Runnable> runnables = new ArrayList<>(discoveryNodes.getSize());
    synchronized (mutex) {
        for (final DiscoveryNode discoveryNode : discoveryNodes) {
            ConnectionTarget connectionTarget = targetsByNode.get(discoveryNode);
            final boolean isNewNode;
            if (connectionTarget == null) {
                // new node, set up target and listener
                connectionTarget = new ConnectionTarget(discoveryNode);
                targetsByNode.put(discoveryNode, connectionTarget);
                isNewNode = true;
            } else {
                // existing node, but maybe we're disconnecting from it, in which case it was recently removed from the cluster
                // state and has now been re-added so we should wait for the re-connection
                isNewNode = connectionTarget.isPendingDisconnection();
            }
            if (isNewNode) {
                runnables.add(connectionTarget.connect(listener));
            } else {
                // known node, try and ensure it's connected but do not wait
                runnables.add(connectionTarget.connect(null));
                runnables.add(() -> listener.onResponse(null));
            }
        }
    }
    runnables.forEach(Runnable::run);
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) ArrayList(java.util.ArrayList)

Example 5 with GroupedActionListener

use of org.elasticsearch.action.support.GroupedActionListener in project crate by crate.

the class DiskThresholdMonitor method onNewInfo.

public void onNewInfo(ClusterInfo info) {
    if (checkInProgress.compareAndSet(false, true) == false) {
        LOGGER.info("skipping monitor as a check is already in progress");
        return;
    }
    final ImmutableOpenMap<String, DiskUsage> usages = info.getNodeLeastAvailableDiskUsages();
    if (usages == null) {
        checkFinished();
        return;
    }
    boolean reroute = false;
    String explanation = "";
    final long currentTimeMillis = currentTimeMillisSupplier.getAsLong();
    // Garbage collect nodes that have been removed from the cluster
    // from the map that tracks watermark crossing
    final ObjectLookupContainer<String> nodes = usages.keys();
    for (String node : nodeHasPassedWatermark) {
        if (nodes.contains(node) == false) {
            nodeHasPassedWatermark.remove(node);
        }
    }
    final ClusterState state = clusterStateSupplier.get();
    final Set<String> indicesToMarkReadOnly = new HashSet<>();
    for (final ObjectObjectCursor<String, DiskUsage> entry : usages) {
        final String node = entry.key;
        final DiskUsage usage = entry.value;
        warnAboutDiskIfNeeded(usage);
        if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage().getBytes() || usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdFloodStage()) {
            final RoutingNode routingNode = state.getRoutingNodes().node(node);
            if (routingNode != null) {
                // this might happen if we haven't got the full cluster-state yet?!
                for (ShardRouting routing : routingNode) {
                    indicesToMarkReadOnly.add(routing.index().getName());
                }
            }
        } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes() || usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) {
            if (lastRunTimeMillis.get() < currentTimeMillis - diskThresholdSettings.getRerouteInterval().millis()) {
                reroute = true;
                explanation = "high disk watermark exceeded on one or more nodes";
            } else {
                LOGGER.debug("high disk watermark exceeded on {} but an automatic reroute has occurred " + "in the last [{}], skipping reroute", node, diskThresholdSettings.getRerouteInterval());
            }
            nodeHasPassedWatermark.add(node);
        } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().getBytes() || usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdLow()) {
            nodeHasPassedWatermark.add(node);
        } else {
            if (nodeHasPassedWatermark.contains(node)) {
                // if they are able to be
                if (lastRunTimeMillis.get() < currentTimeMillis - diskThresholdSettings.getRerouteInterval().millis()) {
                    reroute = true;
                    explanation = "one or more nodes has gone under the high or low watermark";
                    nodeHasPassedWatermark.remove(node);
                } else {
                    LOGGER.debug("{} has gone below a disk threshold, but an automatic reroute has occurred " + "in the last [{}], skipping reroute", node, diskThresholdSettings.getRerouteInterval());
                }
            }
        }
    }
    final ActionListener<Void> listener = new GroupedActionListener<>(ActionListener.wrap(this::checkFinished), 2);
    if (reroute) {
        LOGGER.info("rerouting shards: [{}]", explanation);
        rerouteService.reroute("disk threshold monitor", Priority.HIGH, ActionListener.wrap(r -> {
            setLastRunTimeMillis();
            listener.onResponse(r);
        }, e -> {
            LOGGER.debug("reroute failed", e);
            setLastRunTimeMillis();
            listener.onFailure(e);
        }));
    } else {
        listener.onResponse(null);
    }
    indicesToMarkReadOnly.removeIf(index -> state.getBlocks().indexBlocked(ClusterBlockLevel.WRITE, index));
    if (indicesToMarkReadOnly.isEmpty() == false) {
        markIndicesReadOnly(indicesToMarkReadOnly, ActionListener.wrap(r -> {
            setLastRunTimeMillis();
            listener.onResponse(r);
        }, e -> {
            LOGGER.debug("marking indices readonly failed", e);
            setLastRunTimeMillis();
            listener.onFailure(e);
        }));
    } else {
        listener.onResponse(null);
    }
}
Also used : ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) LongSupplier(java.util.function.LongSupplier) IndexMetadata(org.elasticsearch.cluster.metadata.IndexMetadata) RerouteService(org.elasticsearch.cluster.routing.RerouteService) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Supplier(java.util.function.Supplier) Strings(org.elasticsearch.common.Strings) Sets(io.crate.common.collections.Sets) HashSet(java.util.HashSet) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) DiskUsage(org.elasticsearch.cluster.DiskUsage) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) ClusterBlockLevel(org.elasticsearch.cluster.block.ClusterBlockLevel) Priority(org.elasticsearch.common.Priority) Client(org.elasticsearch.client.Client) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) Set(java.util.Set) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) AtomicLong(java.util.concurrent.atomic.AtomicLong) ClusterInfo(org.elasticsearch.cluster.ClusterInfo) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) Logger(org.apache.logging.log4j.Logger) LogManager(org.apache.logging.log4j.LogManager) ActionListener(org.elasticsearch.action.ActionListener) ObjectLookupContainer(com.carrotsearch.hppc.ObjectLookupContainer) ClusterState(org.elasticsearch.cluster.ClusterState) DiskUsage(org.elasticsearch.cluster.DiskUsage) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) GroupedActionListener(org.elasticsearch.action.support.GroupedActionListener) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) HashSet(java.util.HashSet)

Aggregations

GroupedActionListener (org.elasticsearch.action.support.GroupedActionListener)11 ArrayList (java.util.ArrayList)9 Set (java.util.Set)9 Tuple (io.crate.common.collections.Tuple)8 Collection (java.util.Collection)8 Collections (java.util.Collections)8 List (java.util.List)8 Map (java.util.Map)8 Collectors (java.util.stream.Collectors)8 LogManager (org.apache.logging.log4j.LogManager)8 Logger (org.apache.logging.log4j.Logger)8 Repository (org.elasticsearch.repositories.Repository)8 RepositoryData (org.elasticsearch.repositories.RepositoryData)8 Locale (java.util.Locale)7 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)7 Nullable (javax.annotation.Nullable)7 ParameterizedMessage (org.apache.logging.log4j.message.ParameterizedMessage)7 ActionListener (org.elasticsearch.action.ActionListener)7 IndexMetadata (org.elasticsearch.cluster.metadata.IndexMetadata)7 Version (org.elasticsearch.Version)6