Search in sources :

Example 16 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class TransportClusterHealthAction method executeHealth.

private void executeHealth(final ClusterHealthRequest request, final ActionListener<ClusterHealthResponse> listener) {
    int waitFor = 5;
    if (request.waitForStatus() == null) {
        waitFor--;
    }
    if (request.waitForNoRelocatingShards() == false) {
        waitFor--;
    }
    if (request.waitForActiveShards().equals(ActiveShardCount.NONE)) {
        waitFor--;
    }
    if (request.waitForNodes().isEmpty()) {
        waitFor--;
    }
    if (request.indices() == null || request.indices().length == 0) {
        // check that they actually exists in the meta data
        waitFor--;
    }
    assert waitFor >= 0;
    final ClusterState state = clusterService.state();
    final ClusterStateObserver observer = new ClusterStateObserver(state, clusterService, null, logger, threadPool.getThreadContext());
    if (request.timeout().millis() == 0) {
        listener.onResponse(getResponse(request, state, waitFor, request.timeout().millis() == 0));
        return;
    }
    final int concreteWaitFor = waitFor;
    final Predicate<ClusterState> validationPredicate = newState -> validateRequest(request, newState, concreteWaitFor);
    final ClusterStateObserver.Listener stateListener = new ClusterStateObserver.Listener() {

        @Override
        public void onNewClusterState(ClusterState clusterState) {
            listener.onResponse(getResponse(request, clusterState, concreteWaitFor, false));
        }

        @Override
        public void onClusterServiceClose() {
            listener.onFailure(new IllegalStateException("ClusterService was close during health call"));
        }

        @Override
        public void onTimeout(TimeValue timeout) {
            final ClusterHealthResponse response = getResponse(request, observer.setAndGetObservedState(), concreteWaitFor, true);
            listener.onResponse(response);
        }
    };
    if (validationPredicate.test(state)) {
        stateListener.onNewClusterState(state);
    } else {
        observer.waitForNextChange(stateListener, validationPredicate, request.timeout());
    }
}
Also used : ClusterService(org.elasticsearch.cluster.service.ClusterService) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Strings(org.elasticsearch.common.Strings) Inject(org.elasticsearch.common.inject.Inject) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Settings(org.elasticsearch.common.settings.Settings) ClusterBlockException(org.elasticsearch.cluster.block.ClusterBlockException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) TransportMasterNodeReadAction(org.elasticsearch.action.support.master.TransportMasterNodeReadAction) TimeValue(org.elasticsearch.common.unit.TimeValue) IndicesOptions(org.elasticsearch.action.support.IndicesOptions) ThreadPool(org.elasticsearch.threadpool.ThreadPool) TransportService(org.elasticsearch.transport.TransportService) ClusterStateObserver(org.elasticsearch.cluster.ClusterStateObserver) ActionFilters(org.elasticsearch.action.support.ActionFilters) Predicate(java.util.function.Predicate) UnassignedInfo(org.elasticsearch.cluster.routing.UnassignedInfo) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) Supplier(org.apache.logging.log4j.util.Supplier) LocalClusterUpdateTask(org.elasticsearch.cluster.LocalClusterUpdateTask) ClusterHealthStatus(org.elasticsearch.cluster.health.ClusterHealthStatus) IndexNameExpressionResolver(org.elasticsearch.cluster.metadata.IndexNameExpressionResolver) Task(org.elasticsearch.tasks.Task) ActionListener(org.elasticsearch.action.ActionListener) GatewayAllocator(org.elasticsearch.gateway.GatewayAllocator) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateObserver(org.elasticsearch.cluster.ClusterStateObserver) ActionListener(org.elasticsearch.action.ActionListener) TimeValue(org.elasticsearch.common.unit.TimeValue)

Example 17 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class RestoreService method restoreSnapshot.

/**
     * Restores snapshot specified in the restore request.
     *
     * @param request  restore request
     * @param listener restore listener
     */
public void restoreSnapshot(final RestoreRequest request, final ActionListener<RestoreCompletionResponse> listener) {
    try {
        // Read snapshot info and metadata from the repository
        Repository repository = repositoriesService.repository(request.repositoryName);
        final RepositoryData repositoryData = repository.getRepositoryData();
        final Optional<SnapshotId> incompatibleSnapshotId = repositoryData.getIncompatibleSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
        if (incompatibleSnapshotId.isPresent()) {
            throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "cannot restore incompatible snapshot");
        }
        final Optional<SnapshotId> matchingSnapshotId = repositoryData.getSnapshotIds().stream().filter(s -> request.snapshotName.equals(s.getName())).findFirst();
        if (matchingSnapshotId.isPresent() == false) {
            throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "snapshot does not exist");
        }
        final SnapshotId snapshotId = matchingSnapshotId.get();
        final SnapshotInfo snapshotInfo = repository.getSnapshotInfo(snapshotId);
        final Snapshot snapshot = new Snapshot(request.repositoryName, snapshotId);
        List<String> filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions());
        MetaData metaData = repository.getSnapshotMetaData(snapshotInfo, repositoryData.resolveIndices(filteredIndices));
        // Make sure that we can restore from this snapshot
        validateSnapshotRestorable(request.repositoryName, snapshotInfo);
        // Find list of indices that we need to restore
        final Map<String, String> renamedIndices = renamedIndices(request, filteredIndices);
        // Now we can start the actual restore process by adding shards to be recovered in the cluster state
        // and updating cluster metadata (global and index) as needed
        clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {

            RestoreInfo restoreInfo = null;

            @Override
            public ClusterState execute(ClusterState currentState) {
                // Check if another restore process is already running - cannot run two restore processes at the
                // same time
                RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
                if (restoreInProgress != null && !restoreInProgress.entries().isEmpty()) {
                    throw new ConcurrentSnapshotExecutionException(snapshot, "Restore process is already running in this cluster");
                }
                // Check if the snapshot to restore is currently being deleted
                SnapshotDeletionsInProgress deletionsInProgress = currentState.custom(SnapshotDeletionsInProgress.TYPE);
                if (deletionsInProgress != null && deletionsInProgress.hasDeletionsInProgress()) {
                    throw new ConcurrentSnapshotExecutionException(snapshot, "cannot restore a snapshot while a snapshot deletion is in-progress [" + deletionsInProgress.getEntries().get(0).getSnapshot() + "]");
                }
                // Updating cluster state
                ClusterState.Builder builder = ClusterState.builder(currentState);
                MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
                ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
                RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
                ImmutableOpenMap<ShardId, RestoreInProgress.ShardRestoreStatus> shards;
                Set<String> aliases = new HashSet<>();
                if (!renamedIndices.isEmpty()) {
                    // We have some indices to restore
                    ImmutableOpenMap.Builder<ShardId, RestoreInProgress.ShardRestoreStatus> shardsBuilder = ImmutableOpenMap.builder();
                    final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion().minimumIndexCompatibilityVersion();
                    for (Map.Entry<String, String> indexEntry : renamedIndices.entrySet()) {
                        String index = indexEntry.getValue();
                        boolean partial = checkPartial(index);
                        SnapshotRecoverySource recoverySource = new SnapshotRecoverySource(snapshot, snapshotInfo.version(), index);
                        String renamedIndexName = indexEntry.getKey();
                        IndexMetaData snapshotIndexMetaData = metaData.index(index);
                        snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
                        try {
                            snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData, minIndexCompatibilityVersion);
                        } catch (Exception ex) {
                            throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
                        }
                        // Check that the index is closed or doesn't exist
                        IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName);
                        IntSet ignoreShards = new IntHashSet();
                        final Index renamedIndex;
                        if (currentIndexMetaData == null) {
                            // Index doesn't exist - create it and start recovery
                            // Make sure that the index we are about to create has a validate name
                            MetaDataCreateIndexService.validateIndexName(renamedIndexName, currentState);
                            createIndexService.validateIndexSettings(renamedIndexName, snapshotIndexMetaData.getSettings());
                            IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndexName);
                            indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, UUIDs.randomBase64UUID()));
                            if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) {
                                // Remove all aliases - they shouldn't be restored
                                indexMdBuilder.removeAllAliases();
                            } else {
                                for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
                                    aliases.add(alias.value);
                                }
                            }
                            IndexMetaData updatedIndexMetaData = indexMdBuilder.build();
                            if (partial) {
                                populateIgnoredShards(index, ignoreShards);
                            }
                            rtBuilder.addAsNewRestore(updatedIndexMetaData, recoverySource, ignoreShards);
                            blocks.addBlocks(updatedIndexMetaData);
                            mdBuilder.put(updatedIndexMetaData, true);
                            renamedIndex = updatedIndexMetaData.getIndex();
                        } else {
                            validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndexName, partial);
                            // Index exists and it's closed - open it in metadata and start recovery
                            IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
                            indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1));
                            if (!request.includeAliases()) {
                                // Remove all snapshot aliases
                                if (!snapshotIndexMetaData.getAliases().isEmpty()) {
                                    indexMdBuilder.removeAllAliases();
                                }
                                /// Add existing aliases
                                for (ObjectCursor<AliasMetaData> alias : currentIndexMetaData.getAliases().values()) {
                                    indexMdBuilder.putAlias(alias.value);
                                }
                            } else {
                                for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
                                    aliases.add(alias.value);
                                }
                            }
                            indexMdBuilder.settings(Settings.builder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
                            IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndexName).build();
                            rtBuilder.addAsRestore(updatedIndexMetaData, recoverySource);
                            blocks.updateBlocks(updatedIndexMetaData);
                            mdBuilder.put(updatedIndexMetaData, true);
                            renamedIndex = updatedIndexMetaData.getIndex();
                        }
                        for (int shard = 0; shard < snapshotIndexMetaData.getNumberOfShards(); shard++) {
                            if (!ignoreShards.contains(shard)) {
                                shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId()));
                            } else {
                                shardsBuilder.put(new ShardId(renamedIndex, shard), new RestoreInProgress.ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE));
                            }
                        }
                    }
                    shards = shardsBuilder.build();
                    RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, overallState(RestoreInProgress.State.INIT, shards), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards);
                    builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry));
                } else {
                    shards = ImmutableOpenMap.of();
                }
                checkAliasNameConflicts(renamedIndices, aliases);
                // Restore global state if needed
                restoreGlobalStateIfRequested(mdBuilder);
                if (completed(shards)) {
                    // We don't have any indices to restore - we are done
                    restoreInfo = new RestoreInfo(snapshotId.getName(), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards.size(), shards.size() - failedShards(shards));
                }
                RoutingTable rt = rtBuilder.build();
                ClusterState updatedState = builder.metaData(mdBuilder).blocks(blocks).routingTable(rt).build();
                return allocationService.reroute(updatedState, "restored snapshot [" + snapshot + "]");
            }

            private void checkAliasNameConflicts(Map<String, String> renamedIndices, Set<String> aliases) {
                for (Map.Entry<String, String> renamedIndex : renamedIndices.entrySet()) {
                    if (aliases.contains(renamedIndex.getKey())) {
                        throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
                    }
                }
            }

            private void populateIgnoredShards(String index, IntSet ignoreShards) {
                for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) {
                    if (index.equals(failure.index())) {
                        ignoreShards.add(failure.shardId());
                    }
                }
            }

            private boolean checkPartial(String index) {
                // Make sure that index was fully snapshotted
                if (failed(snapshotInfo, index)) {
                    if (request.partial()) {
                        return true;
                    } else {
                        throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore");
                    }
                } else {
                    return false;
                }
            }

            private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) {
                // Index exist - checking that it's closed
                if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
                    // TODO: Enable restore for open indices
                    throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because it's open");
                }
                // Index exist - checking if it's partial restore
                if (partial) {
                    throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
                }
                // Make sure that the number of shards is the same. That's the only thing that we cannot change
                if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
                    throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() + "] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
                }
            }

            /**
                 * Optionally updates index settings in indexMetaData by removing settings listed in ignoreSettings and
                 * merging them with settings in changeSettings.
                 */
            private IndexMetaData updateIndexSettings(IndexMetaData indexMetaData, Settings changeSettings, String[] ignoreSettings) {
                if (changeSettings.names().isEmpty() && ignoreSettings.length == 0) {
                    return indexMetaData;
                }
                Settings normalizedChangeSettings = Settings.builder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
                IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
                Map<String, String> settingsMap = new HashMap<>(indexMetaData.getSettings().getAsMap());
                List<String> simpleMatchPatterns = new ArrayList<>();
                for (String ignoredSetting : ignoreSettings) {
                    if (!Regex.isSimpleMatchPattern(ignoredSetting)) {
                        if (UNREMOVABLE_SETTINGS.contains(ignoredSetting)) {
                            throw new SnapshotRestoreException(snapshot, "cannot remove setting [" + ignoredSetting + "] on restore");
                        } else {
                            settingsMap.remove(ignoredSetting);
                        }
                    } else {
                        simpleMatchPatterns.add(ignoredSetting);
                    }
                }
                if (!simpleMatchPatterns.isEmpty()) {
                    String[] removePatterns = simpleMatchPatterns.toArray(new String[simpleMatchPatterns.size()]);
                    Iterator<Map.Entry<String, String>> iterator = settingsMap.entrySet().iterator();
                    while (iterator.hasNext()) {
                        Map.Entry<String, String> entry = iterator.next();
                        if (UNREMOVABLE_SETTINGS.contains(entry.getKey()) == false) {
                            if (Regex.simpleMatch(removePatterns, entry.getKey())) {
                                iterator.remove();
                            }
                        }
                    }
                }
                for (Map.Entry<String, String> entry : normalizedChangeSettings.getAsMap().entrySet()) {
                    if (UNMODIFIABLE_SETTINGS.contains(entry.getKey())) {
                        throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + entry.getKey() + "] on restore");
                    } else {
                        settingsMap.put(entry.getKey(), entry.getValue());
                    }
                }
                return builder.settings(Settings.builder().put(settingsMap)).build();
            }

            private void restoreGlobalStateIfRequested(MetaData.Builder mdBuilder) {
                if (request.includeGlobalState()) {
                    if (metaData.persistentSettings() != null) {
                        Settings settings = metaData.persistentSettings();
                        clusterSettings.validateUpdate(settings);
                        mdBuilder.persistentSettings(settings);
                    }
                    if (metaData.templates() != null) {
                        // TODO: Should all existing templates be deleted first?
                        for (ObjectCursor<IndexTemplateMetaData> cursor : metaData.templates().values()) {
                            mdBuilder.put(cursor.value);
                        }
                    }
                    if (metaData.customs() != null) {
                        for (ObjectObjectCursor<String, MetaData.Custom> cursor : metaData.customs()) {
                            if (!RepositoriesMetaData.TYPE.equals(cursor.key)) {
                                // Don't restore repositories while we are working with them
                                // TODO: Should we restore them at the end?
                                mdBuilder.putCustom(cursor.key, cursor.value);
                            }
                        }
                    }
                }
            }

            @Override
            public void onFailure(String source, Exception e) {
                logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", snapshotId), e);
                listener.onFailure(e);
            }

            @Override
            public TimeValue timeout() {
                return request.masterNodeTimeout();
            }

            @Override
            public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
                listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo));
            }
        });
    } catch (Exception e) {
        logger.warn((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to restore snapshot", request.repositoryName + ":" + request.snapshotName), e);
        listener.onFailure(e);
    }
}
Also used : MetaData(org.elasticsearch.cluster.metadata.MetaData) ShardId(org.elasticsearch.index.shard.ShardId) SETTING_INDEX_UUID(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID) SETTING_VERSION_MINIMUM_COMPATIBLE(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE) SnapshotRecoverySource(org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterBlocks(org.elasticsearch.cluster.block.ClusterBlocks) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) ClusterState(org.elasticsearch.cluster.ClusterState) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) Settings(org.elasticsearch.common.settings.Settings) RestoreInProgress(org.elasticsearch.cluster.RestoreInProgress) Map(java.util.Map) IndicesOptions(org.elasticsearch.action.support.IndicesOptions) SETTING_VERSION_UPGRADED(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_UPGRADED) Priority(org.elasticsearch.common.Priority) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) AliasMetaData(org.elasticsearch.cluster.metadata.AliasMetaData) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) ClusterChangedEvent(org.elasticsearch.cluster.ClusterChangedEvent) Collectors(java.util.stream.Collectors) Sets(org.elasticsearch.common.util.set.Sets) Objects(java.util.Objects) RecoverySource(org.elasticsearch.cluster.routing.RecoverySource) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Version(org.elasticsearch.Version) Supplier(org.apache.logging.log4j.util.Supplier) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) Optional(java.util.Optional) RepositoryData(org.elasticsearch.repositories.RepositoryData) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) MetaDataCreateIndexService(org.elasticsearch.cluster.metadata.MetaDataCreateIndexService) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) ClusterService(org.elasticsearch.cluster.service.ClusterService) HashMap(java.util.HashMap) Index(org.elasticsearch.index.Index) Sets.newHashSet(org.elasticsearch.common.util.set.Sets.newHashSet) Lucene(org.elasticsearch.common.lucene.Lucene) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) Inject(org.elasticsearch.common.inject.Inject) ArrayList(java.util.ArrayList) SETTING_NUMBER_OF_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS) HashSet(java.util.HashSet) ClusterStateTaskListener(org.elasticsearch.cluster.ClusterStateTaskListener) TimeValue(org.elasticsearch.common.unit.TimeValue) Regex(org.elasticsearch.common.regex.Regex) SETTING_VERSION_CREATED(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED) SETTING_AUTO_EXPAND_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS) SETTING_CREATION_DATE(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE) ShardRestoreStatus(org.elasticsearch.cluster.RestoreInProgress.ShardRestoreStatus) ClusterStateApplier(org.elasticsearch.cluster.ClusterStateApplier) RepositoriesMetaData(org.elasticsearch.cluster.metadata.RepositoriesMetaData) Repository(org.elasticsearch.repositories.Repository) AbstractComponent(org.elasticsearch.common.component.AbstractComponent) Iterator(java.util.Iterator) SETTING_NUMBER_OF_SHARDS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS) IndexShard(org.elasticsearch.index.shard.IndexShard) IntHashSet(com.carrotsearch.hppc.IntHashSet) IntSet(com.carrotsearch.hppc.IntSet) ClusterStateTaskConfig(org.elasticsearch.cluster.ClusterStateTaskConfig) ClusterStateTaskExecutor(org.elasticsearch.cluster.ClusterStateTaskExecutor) RoutingChangesObserver(org.elasticsearch.cluster.routing.RoutingChangesObserver) UnassignedInfo(org.elasticsearch.cluster.routing.UnassignedInfo) RepositoriesService(org.elasticsearch.repositories.RepositoriesService) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) Collections.unmodifiableSet(java.util.Collections.unmodifiableSet) MetaDataIndexUpgradeService(org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) IndexTemplateMetaData(org.elasticsearch.cluster.metadata.IndexTemplateMetaData) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) IntHashSet(com.carrotsearch.hppc.IntHashSet) ArrayList(java.util.ArrayList) Index(org.elasticsearch.index.Index) SnapshotDeletionsInProgress(org.elasticsearch.cluster.SnapshotDeletionsInProgress) Version(org.elasticsearch.Version) MetaData(org.elasticsearch.cluster.metadata.MetaData) AliasMetaData(org.elasticsearch.cluster.metadata.AliasMetaData) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) RepositoriesMetaData(org.elasticsearch.cluster.metadata.RepositoriesMetaData) IndexTemplateMetaData(org.elasticsearch.cluster.metadata.IndexTemplateMetaData) List(java.util.List) ArrayList(java.util.ArrayList) Settings(org.elasticsearch.common.settings.Settings) ClusterSettings(org.elasticsearch.common.settings.ClusterSettings) TimeValue(org.elasticsearch.common.unit.TimeValue) ClusterState(org.elasticsearch.cluster.ClusterState) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) RestoreInProgress(org.elasticsearch.cluster.RestoreInProgress) SnapshotRecoverySource(org.elasticsearch.cluster.routing.RecoverySource.SnapshotRecoverySource) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) Map(java.util.Map) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) HashMap(java.util.HashMap) Set(java.util.Set) Sets.newHashSet(org.elasticsearch.common.util.set.Sets.newHashSet) HashSet(java.util.HashSet) IntHashSet(com.carrotsearch.hppc.IntHashSet) IntSet(com.carrotsearch.hppc.IntSet) Collections.unmodifiableSet(java.util.Collections.unmodifiableSet) IntSet(com.carrotsearch.hppc.IntSet) ImmutableOpenMap(org.elasticsearch.common.collect.ImmutableOpenMap) ShardId(org.elasticsearch.index.shard.ShardId) Iterator(java.util.Iterator) Supplier(org.apache.logging.log4j.util.Supplier) ClusterStateUpdateTask(org.elasticsearch.cluster.ClusterStateUpdateTask) RepositoryData(org.elasticsearch.repositories.RepositoryData) Repository(org.elasticsearch.repositories.Repository) ShardRestoreStatus(org.elasticsearch.cluster.RestoreInProgress.ShardRestoreStatus) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage)

Example 18 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class RemoteClusterConnectionTests method testCloseWhileConcurrentlyConnecting.

public void testCloseWhileConcurrentlyConnecting() throws IOException, InterruptedException, BrokenBarrierException {
    List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
    try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT);
        MockTransportService seedTransport1 = startTransport("seed_node_1", knownNodes, Version.CURRENT);
        MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) {
        DiscoveryNode seedNode = seedTransport.getLocalDiscoNode();
        DiscoveryNode seedNode1 = seedTransport1.getLocalDiscoNode();
        knownNodes.add(seedTransport.getLocalDiscoNode());
        knownNodes.add(discoverableTransport.getLocalDiscoNode());
        knownNodes.add(seedTransport1.getLocalDiscoNode());
        Collections.shuffle(knownNodes, random());
        List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
        Collections.shuffle(seedNodes, random());
        try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
            service.start();
            service.acceptIncomingRequests();
            try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", seedNodes, service, Integer.MAX_VALUE, n -> true)) {
                int numThreads = randomIntBetween(4, 10);
                Thread[] threads = new Thread[numThreads];
                CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
                for (int i = 0; i < threads.length; i++) {
                    final int numConnectionAttempts = randomIntBetween(10, 100);
                    threads[i] = new Thread() {

                        @Override
                        public void run() {
                            try {
                                barrier.await();
                                CountDownLatch latch = new CountDownLatch(numConnectionAttempts);
                                for (int i = 0; i < numConnectionAttempts; i++) {
                                    AtomicReference<RuntimeException> executed = new AtomicReference<>();
                                    ActionListener<Void> listener = ActionListener.wrap(x -> {
                                        if (executed.compareAndSet(null, new RuntimeException())) {
                                            latch.countDown();
                                        } else {
                                            throw new AssertionError("shit's been called twice", executed.get());
                                        }
                                    }, x -> {
                                        if (executed.compareAndSet(null, new RuntimeException())) {
                                            latch.countDown();
                                        } else {
                                            throw new AssertionError("shit's been called twice", executed.get());
                                        }
                                        if (x instanceof RejectedExecutionException || x instanceof AlreadyClosedException || x instanceof CancellableThreads.ExecutionCancelledException) {
                                        } else {
                                            throw new AssertionError(x);
                                        }
                                    });
                                    connection.updateSeedNodes(seedNodes, listener);
                                }
                                latch.await();
                            } catch (Exception ex) {
                                throw new AssertionError(ex);
                            }
                        }
                    };
                    threads[i].start();
                }
                barrier.await();
                connection.close();
            }
        }
    }
}
Also used : CancellableThreads(org.elasticsearch.common.util.CancellableThreads) Socket(java.net.Socket) Arrays(java.util.Arrays) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicReference(java.util.concurrent.atomic.AtomicReference) ClusterSearchShardsRequest(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest) InetAddress(java.net.InetAddress) ServerSocket(java.net.ServerSocket) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Settings(org.elasticsearch.common.settings.Settings) ThreadPool(org.elasticsearch.threadpool.ThreadPool) ClusterName(org.elasticsearch.cluster.ClusterName) ESTestCase(org.elasticsearch.test.ESTestCase) MockTransportService(org.elasticsearch.test.transport.MockTransportService) ClusterSearchShardsAction(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction) ClusterSearchShardsGroup(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup) Collections.emptyMap(java.util.Collections.emptyMap) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) CyclicBarrier(java.util.concurrent.CyclicBarrier) Collections.emptySet(java.util.Collections.emptySet) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) InetSocketAddress(java.net.InetSocketAddress) UnknownHostException(java.net.UnknownHostException) MockServerSocket(org.elasticsearch.mocksocket.MockServerSocket) ClusterSearchShardsResponse(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) UncheckedIOException(java.io.UncheckedIOException) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Version(org.elasticsearch.Version) SuppressForbidden(org.elasticsearch.common.SuppressForbidden) TransportAddress(org.elasticsearch.common.transport.TransportAddress) TransportConnectionListener(org.elasticsearch.transport.TransportConnectionListener) ClusterStateRequest(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest) ClusterStateAction(org.elasticsearch.action.admin.cluster.state.ClusterStateAction) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) MockTransportService(org.elasticsearch.test.transport.MockTransportService) AtomicReference(java.util.concurrent.atomic.AtomicReference) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) CountDownLatch(java.util.concurrent.CountDownLatch) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) UnknownHostException(java.net.UnknownHostException) UncheckedIOException(java.io.UncheckedIOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 19 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class RemoteClusterConnectionTests method testTriggerUpdatesConcurrently.

public void testTriggerUpdatesConcurrently() throws IOException, InterruptedException {
    List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
    try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT);
        MockTransportService seedTransport1 = startTransport("seed_node_1", knownNodes, Version.CURRENT);
        MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) {
        DiscoveryNode seedNode = seedTransport.getLocalDiscoNode();
        DiscoveryNode discoverableNode = discoverableTransport.getLocalDiscoNode();
        DiscoveryNode seedNode1 = seedTransport1.getLocalDiscoNode();
        knownNodes.add(seedTransport.getLocalDiscoNode());
        knownNodes.add(discoverableTransport.getLocalDiscoNode());
        knownNodes.add(seedTransport1.getLocalDiscoNode());
        Collections.shuffle(knownNodes, random());
        List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
        Collections.shuffle(seedNodes, random());
        try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
            service.start();
            service.acceptIncomingRequests();
            try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", seedNodes, service, Integer.MAX_VALUE, n -> true)) {
                int numThreads = randomIntBetween(4, 10);
                Thread[] threads = new Thread[numThreads];
                CyclicBarrier barrier = new CyclicBarrier(numThreads);
                for (int i = 0; i < threads.length; i++) {
                    final int numConnectionAttempts = randomIntBetween(10, 200);
                    threads[i] = new Thread() {

                        @Override
                        public void run() {
                            try {
                                barrier.await();
                                CountDownLatch latch = new CountDownLatch(numConnectionAttempts);
                                for (int i = 0; i < numConnectionAttempts; i++) {
                                    AtomicBoolean executed = new AtomicBoolean(false);
                                    ActionListener<Void> listener = ActionListener.wrap(x -> {
                                        assertTrue(executed.compareAndSet(false, true));
                                        latch.countDown();
                                    }, x -> {
                                        assertTrue(executed.compareAndSet(false, true));
                                        latch.countDown();
                                        if (x instanceof RejectedExecutionException) {
                                        } else {
                                            throw new AssertionError(x);
                                        }
                                    });
                                    connection.updateSeedNodes(seedNodes, listener);
                                }
                                latch.await();
                            } catch (Exception ex) {
                                throw new AssertionError(ex);
                            }
                        }
                    };
                    threads[i].start();
                }
                for (int i = 0; i < threads.length; i++) {
                    threads[i].join();
                }
                assertTrue(service.nodeConnected(seedNode));
                assertTrue(service.nodeConnected(discoverableNode));
                assertTrue(service.nodeConnected(seedNode1));
                assertTrue(connection.assertNoRunningConnections());
            }
        }
    }
}
Also used : CancellableThreads(org.elasticsearch.common.util.CancellableThreads) Socket(java.net.Socket) Arrays(java.util.Arrays) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicReference(java.util.concurrent.atomic.AtomicReference) ClusterSearchShardsRequest(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest) InetAddress(java.net.InetAddress) ServerSocket(java.net.ServerSocket) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Settings(org.elasticsearch.common.settings.Settings) ThreadPool(org.elasticsearch.threadpool.ThreadPool) ClusterName(org.elasticsearch.cluster.ClusterName) ESTestCase(org.elasticsearch.test.ESTestCase) MockTransportService(org.elasticsearch.test.transport.MockTransportService) ClusterSearchShardsAction(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction) ClusterSearchShardsGroup(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup) Collections.emptyMap(java.util.Collections.emptyMap) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) CyclicBarrier(java.util.concurrent.CyclicBarrier) Collections.emptySet(java.util.Collections.emptySet) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) InetSocketAddress(java.net.InetSocketAddress) UnknownHostException(java.net.UnknownHostException) MockServerSocket(org.elasticsearch.mocksocket.MockServerSocket) ClusterSearchShardsResponse(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) UncheckedIOException(java.io.UncheckedIOException) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Version(org.elasticsearch.Version) SuppressForbidden(org.elasticsearch.common.SuppressForbidden) TransportAddress(org.elasticsearch.common.transport.TransportAddress) TransportConnectionListener(org.elasticsearch.transport.TransportConnectionListener) ClusterStateRequest(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest) ClusterStateAction(org.elasticsearch.action.admin.cluster.state.ClusterStateAction) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) MockTransportService(org.elasticsearch.test.transport.MockTransportService) CountDownLatch(java.util.concurrent.CountDownLatch) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) UnknownHostException(java.net.UnknownHostException) UncheckedIOException(java.io.UncheckedIOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 20 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class DfsQueryPhaseTests method testFailPhaseOnException.

public void testFailPhaseOnException() throws IOException {
    AtomicArray<DfsSearchResult> results = new AtomicArray<>(2);
    AtomicReference<AtomicArray<QuerySearchResultProvider>> responseRef = new AtomicReference<>();
    results.set(0, new DfsSearchResult(1, new SearchShardTarget("node1", new Index("test", "na"), 0)));
    results.set(1, new DfsSearchResult(2, new SearchShardTarget("node2", new Index("test", "na"), 0)));
    results.get(0).termsStatistics(new Term[0], new TermStatistics[0]);
    results.get(1).termsStatistics(new Term[0], new TermStatistics[0]);
    SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
    SearchTransportService searchTransportService = new SearchTransportService(Settings.builder().put("search.remote.connect", false).build(), null, null) {

        @Override
        public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, ActionListener<QuerySearchResult> listener) {
            if (request.id() == 1) {
                QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0));
                queryResult.topDocs(new TopDocs(1, new ScoreDoc[] { new ScoreDoc(42, 1.0F) }, 2.0F), new DocValueFormat[0]);
                // the size of the result set
                queryResult.size(2);
                listener.onResponse(queryResult);
            } else if (request.id() == 2) {
                throw new UncheckedIOException(new MockDirectoryWrapper.FakeIOException());
            } else {
                fail("no such request ID: " + request.id());
            }
        }
    };
    MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
    mockSearchPhaseContext.searchTransport = searchTransportService;
    DfsQueryPhase phase = new DfsQueryPhase(results, controller, (response) -> new SearchPhase("test") {

        @Override
        public void run() throws IOException {
            responseRef.set(response.results);
        }
    }, mockSearchPhaseContext);
    assertEquals("dfs_query", phase.getName());
    expectThrows(UncheckedIOException.class, () -> phase.run());
    // phase execution will clean up on the contexts
    assertTrue(mockSearchPhaseContext.releasedSearchContexts.isEmpty());
}
Also used : AtomicArray(org.elasticsearch.common.util.concurrent.AtomicArray) DfsSearchResult(org.elasticsearch.search.dfs.DfsSearchResult) AtomicReference(java.util.concurrent.atomic.AtomicReference) Index(org.elasticsearch.index.Index) UncheckedIOException(java.io.UncheckedIOException) IOException(java.io.IOException) UncheckedIOException(java.io.UncheckedIOException) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) ActionListener(org.elasticsearch.action.ActionListener) QuerySearchResult(org.elasticsearch.search.query.QuerySearchResult) SearchShardTarget(org.elasticsearch.search.SearchShardTarget) QuerySearchRequest(org.elasticsearch.search.query.QuerySearchRequest)

Aggregations

ActionListener (org.elasticsearch.action.ActionListener)57 IOException (java.io.IOException)25 AtomicReference (java.util.concurrent.atomic.AtomicReference)18 ThreadPool (org.elasticsearch.threadpool.ThreadPool)18 ClusterState (org.elasticsearch.cluster.ClusterState)17 Settings (org.elasticsearch.common.settings.Settings)17 Index (org.elasticsearch.index.Index)13 IndexNameExpressionResolver (org.elasticsearch.cluster.metadata.IndexNameExpressionResolver)12 ShardId (org.elasticsearch.index.shard.ShardId)12 TransportService (org.elasticsearch.transport.TransportService)12 CountDownLatch (java.util.concurrent.CountDownLatch)11 ArrayList (java.util.ArrayList)10 TimeValue (org.elasticsearch.common.unit.TimeValue)10 CrateUnitTest (io.crate.test.integration.CrateUnitTest)9 List (java.util.List)9 ActionFilters (org.elasticsearch.action.support.ActionFilters)9 TransportException (org.elasticsearch.transport.TransportException)9 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)8 EsRejectedExecutionException (org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)8 ShardResponse (io.crate.executor.transport.ShardResponse)7