Search in sources :

Example 66 with ActionListener

use of org.elasticsearch.action.ActionListener in project crate by crate.

the class RemoteRecoveryTargetHandler method writeFileChunk.

@Override
public void writeFileChunk(StoreFileMetadata fileMetadata, long position, BytesReference content, boolean lastChunk, int totalTranslogOps, ActionListener<Void> listener) {
    // Pause using the rate limiter, if desired, to throttle the recovery
    final long throttleTimeInNanos;
    // always fetch the ratelimiter - it might be updated in real-time on the recovery settings
    final RateLimiter rl = recoverySettings.rateLimiter();
    if (rl != null) {
        long bytes = bytesSinceLastPause.addAndGet(content.length());
        if (bytes > rl.getMinPauseCheckBytes()) {
            // Time to pause
            bytesSinceLastPause.addAndGet(-bytes);
            try {
                throttleTimeInNanos = rl.pause(bytes);
                onSourceThrottle.accept(throttleTimeInNanos);
            } catch (IOException e) {
                throw new ElasticsearchException("failed to pause recovery", e);
            }
        } else {
            throttleTimeInNanos = 0;
        }
    } else {
        throttleTimeInNanos = 0;
    }
    final String action = PeerRecoveryTargetService.Actions.FILE_CHUNK;
    /* we send estimateTotalOperations with every request since we collect stats on the target and that way we can
         * see how many translog ops we accumulate while copying files across the network. A future optimization
         * would be in to restart file copy again (new deltas) if we have too many translog ops are piling up.
         */
    final RecoveryFileChunkRequest request = new RecoveryFileChunkRequest(recoveryId, shardId, fileMetadata, position, content, lastChunk, totalTranslogOps, throttleTimeInNanos);
    final Writeable.Reader<TransportResponse.Empty> reader = in -> TransportResponse.Empty.INSTANCE;
    executeRetryableAction(action, request, fileChunkRequestOptions, ActionListener.map(listener, r -> null), reader);
}
Also used : ElasticsearchException(org.elasticsearch.ElasticsearchException) CancellableThreads(org.elasticsearch.common.util.CancellableThreads) ShardId(org.elasticsearch.index.shard.ShardId) TransportRequest(org.elasticsearch.transport.TransportRequest) ConcurrentCollections(org.elasticsearch.common.util.concurrent.ConcurrentCollections) StoreFileMetadata(org.elasticsearch.index.store.StoreFileMetadata) RetentionLeases(org.elasticsearch.index.seqno.RetentionLeases) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) ActionListenerResponseHandler(org.elasticsearch.action.ActionListenerResponseHandler) Store(org.elasticsearch.index.store.Store) Map(java.util.Map) ThreadPool(org.elasticsearch.threadpool.ThreadPool) TransportResponse(org.elasticsearch.transport.TransportResponse) TransportService(org.elasticsearch.transport.TransportService) IOException(java.io.IOException) BytesReference(org.elasticsearch.common.bytes.BytesReference) Consumer(java.util.function.Consumer) AtomicLong(java.util.concurrent.atomic.AtomicLong) RemoteTransportException(org.elasticsearch.transport.RemoteTransportException) List(java.util.List) Logger(org.apache.logging.log4j.Logger) CircuitBreakingException(org.elasticsearch.common.breaker.CircuitBreakingException) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) TransportFuture(org.elasticsearch.transport.TransportFuture) TimeValue(io.crate.common.unit.TimeValue) Translog(org.elasticsearch.index.translog.Translog) EmptyTransportResponseHandler(org.elasticsearch.transport.EmptyTransportResponseHandler) RetryableAction(org.elasticsearch.action.support.RetryableAction) ReplicationTracker(org.elasticsearch.index.seqno.ReplicationTracker) TransportRequestOptions(org.elasticsearch.transport.TransportRequestOptions) SQLExceptions(io.crate.exceptions.SQLExceptions) Writeable(org.elasticsearch.common.io.stream.Writeable) LogManager(org.apache.logging.log4j.LogManager) RateLimiter(org.apache.lucene.store.RateLimiter) ActionListener(org.elasticsearch.action.ActionListener) IOException(java.io.IOException) ElasticsearchException(org.elasticsearch.ElasticsearchException) Writeable(org.elasticsearch.common.io.stream.Writeable) RateLimiter(org.apache.lucene.store.RateLimiter)

Example 67 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class MetaDataCreateIndexService method onlyCreateIndex.

private void onlyCreateIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
    Settings.Builder updatedSettingsBuilder = Settings.builder();
    updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
    indexScopedSettings.validate(updatedSettingsBuilder);
    request.settings(updatedSettingsBuilder.build());
    clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, wrapPreservingContext(listener)) {

        @Override
        protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
            return new ClusterStateUpdateResponse(acknowledged);
        }

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            Index createdIndex = null;
            String removalExtraInfo = null;
            IndexRemovalReason removalReason = IndexRemovalReason.FAILURE;
            try {
                validate(request, currentState);
                for (Alias alias : request.aliases()) {
                    aliasValidator.validateAlias(alias, request.index(), currentState.metaData());
                }
                // we only find a template when its an API call (a new index)
                // find templates, highest order are better matching
                List<IndexTemplateMetaData> templates = findTemplates(request, currentState);
                Map<String, Custom> customs = new HashMap<>();
                // add the request mapping
                Map<String, Map<String, Object>> mappings = new HashMap<>();
                Map<String, AliasMetaData> templatesAliases = new HashMap<>();
                List<String> templateNames = new ArrayList<>();
                for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
                    mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
                }
                for (Map.Entry<String, Custom> entry : request.customs().entrySet()) {
                    customs.put(entry.getKey(), entry.getValue());
                }
                // apply templates, merging the mappings into the request mapping if exists
                for (IndexTemplateMetaData template : templates) {
                    templateNames.add(template.getName());
                    for (ObjectObjectCursor<String, CompressedXContent> cursor : template.mappings()) {
                        String mappingString = cursor.value.string();
                        if (mappings.containsKey(cursor.key)) {
                            XContentHelper.mergeDefaults(mappings.get(cursor.key), MapperService.parseMapping(xContentRegistry, mappingString));
                        } else {
                            mappings.put(cursor.key, MapperService.parseMapping(xContentRegistry, mappingString));
                        }
                    }
                    // handle custom
                    for (ObjectObjectCursor<String, Custom> cursor : template.customs()) {
                        String type = cursor.key;
                        IndexMetaData.Custom custom = cursor.value;
                        IndexMetaData.Custom existing = customs.get(type);
                        if (existing == null) {
                            customs.put(type, custom);
                        } else {
                            IndexMetaData.Custom merged = existing.mergeWith(custom);
                            customs.put(type, merged);
                        }
                    }
                    //handle aliases
                    for (ObjectObjectCursor<String, AliasMetaData> cursor : template.aliases()) {
                        AliasMetaData aliasMetaData = cursor.value;
                        // ignore this one taken from the index template
                        if (request.aliases().contains(new Alias(aliasMetaData.alias()))) {
                            continue;
                        }
                        //if an alias with same name was already processed, ignore this one
                        if (templatesAliases.containsKey(cursor.key)) {
                            continue;
                        }
                        //Allow templatesAliases to be templated by replacing a token with the name of the index that we are applying it to
                        if (aliasMetaData.alias().contains("{index}")) {
                            String templatedAlias = aliasMetaData.alias().replace("{index}", request.index());
                            aliasMetaData = AliasMetaData.newAliasMetaData(aliasMetaData, templatedAlias);
                        }
                        aliasValidator.validateAliasMetaData(aliasMetaData, request.index(), currentState.metaData());
                        templatesAliases.put(aliasMetaData.alias(), aliasMetaData);
                    }
                }
                Settings.Builder indexSettingsBuilder = Settings.builder();
                // apply templates, here, in reverse order, since first ones are better matching
                for (int i = templates.size() - 1; i >= 0; i--) {
                    indexSettingsBuilder.put(templates.get(i).settings());
                }
                // now, put the request settings, so they override templates
                indexSettingsBuilder.put(request.settings());
                if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {
                    indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
                }
                if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {
                    indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
                }
                if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {
                    indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
                }
                if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
                    DiscoveryNodes nodes = currentState.nodes();
                    final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
                    indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
                }
                if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) {
                    indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
                }
                indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
                indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
                final Index shrinkFromIndex = request.shrinkFrom();
                int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());
                ;
                if (shrinkFromIndex != null) {
                    prepareShrinkIndexSettings(currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, request.index());
                    IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex);
                    routingNumShards = sourceMetaData.getRoutingNumShards();
                }
                Settings actualIndexSettings = indexSettingsBuilder.build();
                IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()).setRoutingNumShards(routingNumShards);
                // Set up everything, now locally create the index to see that things are ok, and apply
                final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build();
                ActiveShardCount waitForActiveShards = request.waitForActiveShards();
                if (waitForActiveShards == ActiveShardCount.DEFAULT) {
                    waitForActiveShards = tmpImd.getWaitForActiveShards();
                }
                if (waitForActiveShards.validate(tmpImd.getNumberOfReplicas()) == false) {
                    throw new IllegalArgumentException("invalid wait_for_active_shards[" + request.waitForActiveShards() + "]: cannot be greater than number of shard copies [" + (tmpImd.getNumberOfReplicas() + 1) + "]");
                }
                // create the index here (on the master) to validate it can be created, as well as adding the mapping
                final IndexService indexService = indicesService.createIndex(tmpImd, Collections.emptyList(), shardId -> {
                });
                createdIndex = indexService.index();
                // now add the mappings
                MapperService mapperService = indexService.mapperService();
                try {
                    mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
                } catch (Exception e) {
                    removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
                    throw e;
                }
                // the context is only used for validation so it's fine to pass fake values for the shard id and the current
                // timestamp
                final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L);
                for (Alias alias : request.aliases()) {
                    if (Strings.hasLength(alias.filter())) {
                        aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext, xContentRegistry);
                    }
                }
                for (AliasMetaData aliasMetaData : templatesAliases.values()) {
                    if (aliasMetaData.filter() != null) {
                        aliasValidator.validateAliasFilter(aliasMetaData.alias(), aliasMetaData.filter().uncompressed(), queryShardContext, xContentRegistry);
                    }
                }
                // now, update the mappings with the actual source
                Map<String, MappingMetaData> mappingsMetaData = new HashMap<>();
                for (DocumentMapper mapper : mapperService.docMappers(true)) {
                    MappingMetaData mappingMd = new MappingMetaData(mapper);
                    mappingsMetaData.put(mapper.type(), mappingMd);
                }
                final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()).settings(actualIndexSettings).setRoutingNumShards(routingNumShards);
                for (MappingMetaData mappingMd : mappingsMetaData.values()) {
                    indexMetaDataBuilder.putMapping(mappingMd);
                }
                for (AliasMetaData aliasMetaData : templatesAliases.values()) {
                    indexMetaDataBuilder.putAlias(aliasMetaData);
                }
                for (Alias alias : request.aliases()) {
                    AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()).indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build();
                    indexMetaDataBuilder.putAlias(aliasMetaData);
                }
                for (Map.Entry<String, Custom> customEntry : customs.entrySet()) {
                    indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue());
                }
                indexMetaDataBuilder.state(request.state());
                final IndexMetaData indexMetaData;
                try {
                    indexMetaData = indexMetaDataBuilder.build();
                } catch (Exception e) {
                    removalExtraInfo = "failed to build index metadata";
                    throw e;
                }
                indexService.getIndexEventListener().beforeIndexAddedToCluster(indexMetaData.getIndex(), indexMetaData.getSettings());
                MetaData newMetaData = MetaData.builder(currentState.metaData()).put(indexMetaData, false).build();
                String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : "";
                logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(), indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
                ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
                if (!request.blocks().isEmpty()) {
                    for (ClusterBlock block : request.blocks()) {
                        blocks.addIndexBlock(request.index(), block);
                    }
                }
                blocks.updateBlocks(indexMetaData);
                ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build();
                if (request.state() == State.OPEN) {
                    RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()).addAsNew(updatedState.metaData().index(request.index()));
                    updatedState = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), "index [" + request.index() + "] created");
                }
                removalExtraInfo = "cleaning up after validating index on master";
                removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED;
                return updatedState;
            } finally {
                if (createdIndex != null) {
                    // Index was already partially created - need to clean up
                    indicesService.removeIndex(createdIndex, removalReason, removalExtraInfo);
                }
            }
        }

        @Override
        public void onFailure(String source, Exception e) {
            if (e instanceof ResourceAlreadyExistsException) {
                logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
            } else {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
            }
            super.onFailure(source, e);
        }
    });
}
Also used : ElasticsearchException(org.elasticsearch.ElasticsearchException) SETTING_INDEX_UUID(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID) DateTimeZone(org.joda.time.DateTimeZone) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) Alias(org.elasticsearch.action.admin.indices.alias.Alias) Environment(org.elasticsearch.env.Environment) BiFunction(java.util.function.BiFunction) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterBlocks(org.elasticsearch.cluster.block.ClusterBlocks) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) CreateIndexClusterStateUpdateResponse(org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse) ClusterBlock(org.elasticsearch.cluster.block.ClusterBlock) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CompressedXContent(org.elasticsearch.common.compress.CompressedXContent) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IndexCreationException(org.elasticsearch.indices.IndexCreationException) Locale(java.util.Locale) Map(java.util.Map) ValidationException(org.elasticsearch.common.ValidationException) ThreadPool(org.elasticsearch.threadpool.ThreadPool) State(org.elasticsearch.cluster.metadata.IndexMetaData.State) Path(java.nio.file.Path) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) CreateIndexClusterStateUpdateRequest(org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest) Priority(org.elasticsearch.common.Priority) Predicate(java.util.function.Predicate) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) ContextPreservingActionListener(org.elasticsearch.action.support.ContextPreservingActionListener) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) Version(org.elasticsearch.Version) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) Supplier(org.apache.logging.log4j.util.Supplier) ClusterStateUpdateResponse(org.elasticsearch.cluster.ack.ClusterStateUpdateResponse) InvalidIndexNameException(org.elasticsearch.indices.InvalidIndexNameException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) AckedClusterStateUpdateTask(org.elasticsearch.cluster.AckedClusterStateUpdateTask) ClusterService(org.elasticsearch.cluster.service.ClusterService) HashMap(java.util.HashMap) Index(org.elasticsearch.index.Index) ShardRoutingState(org.elasticsearch.cluster.routing.ShardRoutingState) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) ActiveShardsObserver(org.elasticsearch.action.support.ActiveShardsObserver) Strings(org.elasticsearch.common.Strings) Inject(org.elasticsearch.common.inject.Inject) ArrayList(java.util.ArrayList) SETTING_NUMBER_OF_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS) XContentHelper(org.elasticsearch.common.xcontent.XContentHelper) IndexRemovalReason(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason) Custom(org.elasticsearch.cluster.metadata.IndexMetaData.Custom) Regex(org.elasticsearch.common.regex.Regex) SETTING_VERSION_CREATED(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED) IndicesService(org.elasticsearch.indices.IndicesService) SETTING_AUTO_EXPAND_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS) ClusterBlockLevel(org.elasticsearch.cluster.block.ClusterBlockLevel) SETTING_CREATION_DATE(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE) PathUtils(org.elasticsearch.common.io.PathUtils) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) AbstractComponent(org.elasticsearch.common.component.AbstractComponent) SETTING_NUMBER_OF_SHARDS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS) IndexService(org.elasticsearch.index.IndexService) DateTime(org.joda.time.DateTime) IOException(java.io.IOException) IndexScopedSettings(org.elasticsearch.common.settings.IndexScopedSettings) CollectionUtil(org.apache.lucene.util.CollectionUtil) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MergeReason(org.elasticsearch.index.mapper.MapperService.MergeReason) Comparator(java.util.Comparator) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) IndexService(org.elasticsearch.index.IndexService) Index(org.elasticsearch.index.Index) DateTime(org.joda.time.DateTime) ClusterBlock(org.elasticsearch.cluster.block.ClusterBlock) Version(org.elasticsearch.Version) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) List(java.util.List) ArrayList(java.util.ArrayList) Supplier(org.apache.logging.log4j.util.Supplier) CreateIndexClusterStateUpdateResponse(org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse) ClusterStateUpdateResponse(org.elasticsearch.cluster.ack.ClusterStateUpdateResponse) Settings(org.elasticsearch.common.settings.Settings) IndexScopedSettings(org.elasticsearch.common.settings.IndexScopedSettings) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) ClusterState(org.elasticsearch.cluster.ClusterState) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) Custom(org.elasticsearch.cluster.metadata.IndexMetaData.Custom) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) ElasticsearchException(org.elasticsearch.ElasticsearchException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IndexCreationException(org.elasticsearch.indices.IndexCreationException) ValidationException(org.elasticsearch.common.ValidationException) InvalidIndexNameException(org.elasticsearch.indices.InvalidIndexNameException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) IOException(java.io.IOException) IndexRemovalReason(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason) Alias(org.elasticsearch.action.admin.indices.alias.Alias) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) Map(java.util.Map) HashMap(java.util.HashMap) MapperService(org.elasticsearch.index.mapper.MapperService)

Example 68 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class RemoteClusterService method updateRemoteClusters.

/**
     * This method updates the list of remote clusters. It's intended to be used as an update consumer on the settings infrastructure
     * @param seeds a cluster alias to discovery node mapping representing the remote clusters seeds nodes
     * @param connectionListener a listener invoked once every configured cluster has been connected to
     */
private synchronized void updateRemoteClusters(Map<String, List<DiscoveryNode>> seeds, ActionListener<Void> connectionListener) {
    if (seeds.containsKey(LOCAL_CLUSTER_GROUP_KEY)) {
        throw new IllegalArgumentException("remote clusters must not have the empty string as its key");
    }
    Map<String, RemoteClusterConnection> remoteClusters = new HashMap<>();
    if (seeds.isEmpty()) {
        connectionListener.onResponse(null);
    } else {
        CountDown countDown = new CountDown(seeds.size());
        Predicate<DiscoveryNode> nodePredicate = (node) -> Version.CURRENT.isCompatible(node.getVersion());
        if (REMOTE_NODE_ATTRIBUTE.exists(settings)) {
            // nodes can be tagged with node.attr.remote_gateway: true to allow a node to be a gateway node for
            // cross cluster search
            String attribute = REMOTE_NODE_ATTRIBUTE.get(settings);
            nodePredicate = nodePredicate.and((node) -> Boolean.getBoolean(node.getAttributes().getOrDefault(attribute, "false")));
        }
        remoteClusters.putAll(this.remoteClusters);
        for (Map.Entry<String, List<DiscoveryNode>> entry : seeds.entrySet()) {
            RemoteClusterConnection remote = this.remoteClusters.get(entry.getKey());
            if (entry.getValue().isEmpty()) {
                // with no seed nodes we just remove the connection
                try {
                    IOUtils.close(remote);
                } catch (IOException e) {
                    logger.warn("failed to close remote cluster connections for cluster: " + entry.getKey(), e);
                }
                remoteClusters.remove(entry.getKey());
                continue;
            }
            if (remote == null) {
                // this is a new cluster we have to add a new representation
                remote = new RemoteClusterConnection(settings, entry.getKey(), entry.getValue(), transportService, numRemoteConnections, nodePredicate);
                remoteClusters.put(entry.getKey(), remote);
            }
            // now update the seed nodes no matter if it's new or already existing
            RemoteClusterConnection finalRemote = remote;
            remote.updateSeedNodes(entry.getValue(), ActionListener.wrap(response -> {
                if (countDown.countDown()) {
                    connectionListener.onResponse(response);
                }
            }, exception -> {
                if (countDown.fastForward()) {
                    connectionListener.onFailure(exception);
                }
                if (finalRemote.isClosed() == false) {
                    logger.warn("failed to update seed list for cluster: " + entry.getKey(), exception);
                }
            }));
        }
    }
    this.remoteClusters = Collections.unmodifiableMap(remoteClusters);
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) Arrays(java.util.Arrays) ShardIterator(org.elasticsearch.cluster.routing.ShardIterator) TimeoutException(java.util.concurrent.TimeoutException) PlainShardIterator(org.elasticsearch.cluster.routing.PlainShardIterator) HashMap(java.util.HashMap) Index(org.elasticsearch.index.Index) AtomicReference(java.util.concurrent.atomic.AtomicReference) Function(java.util.function.Function) Strings(org.elasticsearch.common.Strings) ArrayList(java.util.ArrayList) InetAddress(java.net.InetAddress) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) Settings(org.elasticsearch.common.settings.Settings) TimeValue(org.elasticsearch.common.unit.TimeValue) Map(java.util.Map) CountDown(org.elasticsearch.common.util.concurrent.CountDown) TransportService(org.elasticsearch.transport.TransportService) ClusterSearchShardsGroup(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup) Transport(org.elasticsearch.transport.Transport) AbstractComponent(org.elasticsearch.common.component.AbstractComponent) Setting(org.elasticsearch.common.settings.Setting) Predicate(java.util.function.Predicate) PlainActionFuture(org.elasticsearch.action.support.PlainActionFuture) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) IOUtils(org.apache.lucene.util.IOUtils) IOException(java.io.IOException) InetSocketAddress(java.net.InetSocketAddress) UnknownHostException(java.net.UnknownHostException) Collectors(java.util.stream.Collectors) ClusterSearchShardsResponse(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse) TimeUnit(java.util.concurrent.TimeUnit) AliasFilter(org.elasticsearch.search.internal.AliasFilter) List(java.util.List) Version(org.elasticsearch.Version) Stream(java.util.stream.Stream) TransportAddress(org.elasticsearch.common.transport.TransportAddress) Supplier(org.apache.logging.log4j.util.Supplier) Closeable(java.io.Closeable) TransportException(org.elasticsearch.transport.TransportException) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) HashMap(java.util.HashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) IOException(java.io.IOException) CountDown(org.elasticsearch.common.util.concurrent.CountDown) ArrayList(java.util.ArrayList) List(java.util.List) HashMap(java.util.HashMap) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 69 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class ClientScrollableHitSource method searchWithRetry.

/**
     * Run a search action and call onResponse when a the response comes in, retrying if the action fails with an exception caused by
     * rejected execution.
     *
     * @param action consumes a listener and starts the action. The listener it consumes is rigged to retry on failure.
     * @param onResponse consumes the response from the action
     */
private void searchWithRetry(Consumer<ActionListener<SearchResponse>> action, Consumer<SearchResponse> onResponse) {
    /*
         * RetryHelper is both an AbstractRunnable and an ActionListener<SearchResponse> - meaning that it both starts the search and
         * handles reacts to the results. The complexity is all in onFailure which either adapts the failure to the "fail" listener or
         * retries the search. Since both AbstractRunnable and ActionListener define the onFailure method it is called for either failure
         * to run the action (either while running or before starting) and for failure on the response from the action.
         */
    class RetryHelper extends AbstractRunnable implements ActionListener<SearchResponse> {

        private final Iterator<TimeValue> retries = backoffPolicy.iterator();

        /**
             * The runnable to run that retries in the same context as the original call.
             */
        private Runnable retryWithContext;

        private volatile int retryCount = 0;

        @Override
        protected void doRun() throws Exception {
            action.accept(this);
        }

        @Override
        public void onResponse(SearchResponse response) {
            onResponse.accept(response);
        }

        @Override
        public void onFailure(Exception e) {
            if (ExceptionsHelper.unwrap(e, EsRejectedExecutionException.class) != null) {
                if (retries.hasNext()) {
                    retryCount += 1;
                    TimeValue delay = retries.next();
                    logger.trace((Supplier<?>) () -> new ParameterizedMessage("retrying rejected search after [{}]", delay), e);
                    countSearchRetry.run();
                    threadPool.schedule(delay, ThreadPool.Names.SAME, retryWithContext);
                } else {
                    logger.warn((Supplier<?>) () -> new ParameterizedMessage("giving up on search because we retried [{}] times without success", retryCount), e);
                    fail.accept(e);
                }
            } else {
                logger.warn("giving up on search because it failed with a non-retryable exception", e);
                fail.accept(e);
            }
        }
    }
    RetryHelper helper = new RetryHelper();
    // Wrap the helper in a runnable that preserves the current context so we keep it on retry.
    helper.retryWithContext = threadPool.getThreadContext().preserveContext(helper);
    helper.run();
}
Also used : AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) ActionListener(org.elasticsearch.action.ActionListener) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) Iterator(java.util.Iterator) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException) TimeValue(org.elasticsearch.common.unit.TimeValue) SearchResponse(org.elasticsearch.action.search.SearchResponse) EsRejectedExecutionException(org.elasticsearch.common.util.concurrent.EsRejectedExecutionException)

Example 70 with ActionListener

use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.

the class TransportBroadcastReplicationAction method doExecute.

@Override
protected void doExecute(Task task, Request request, ActionListener<Response> listener) {
    final ClusterState clusterState = clusterService.state();
    List<ShardId> shards = shards(request, clusterState);
    final CopyOnWriteArrayList<ShardResponse> shardsResponses = new CopyOnWriteArrayList();
    if (shards.size() == 0) {
        finishAndNotifyListener(listener, shardsResponses);
    }
    final CountDown responsesCountDown = new CountDown(shards.size());
    for (final ShardId shardId : shards) {
        ActionListener<ShardResponse> shardActionListener = new ActionListener<ShardResponse>() {

            @Override
            public void onResponse(ShardResponse shardResponse) {
                shardsResponses.add(shardResponse);
                logger.trace("{}: got response from {}", actionName, shardId);
                if (responsesCountDown.countDown()) {
                    finishAndNotifyListener(listener, shardsResponses);
                }
            }

            @Override
            public void onFailure(Exception e) {
                logger.trace("{}: got failure from {}", actionName, shardId);
                int totalNumCopies = clusterState.getMetaData().getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1;
                ShardResponse shardResponse = newShardResponse();
                ReplicationResponse.ShardInfo.Failure[] failures;
                if (TransportActions.isShardNotAvailableException(e)) {
                    failures = new ReplicationResponse.ShardInfo.Failure[0];
                } else {
                    ReplicationResponse.ShardInfo.Failure failure = new ReplicationResponse.ShardInfo.Failure(shardId, null, e, ExceptionsHelper.status(e), true);
                    failures = new ReplicationResponse.ShardInfo.Failure[totalNumCopies];
                    Arrays.fill(failures, failure);
                }
                shardResponse.setShardInfo(new ReplicationResponse.ShardInfo(totalNumCopies, 0, failures));
                shardsResponses.add(shardResponse);
                if (responsesCountDown.countDown()) {
                    finishAndNotifyListener(listener, shardsResponses);
                }
            }
        };
        shardExecute(task, request, shardId, shardActionListener);
    }
}
Also used : ClusterState(org.elasticsearch.cluster.ClusterState) CountDown(org.elasticsearch.common.util.concurrent.CountDown) BroadcastShardOperationFailedException(org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException) ShardOperationFailedException(org.elasticsearch.action.ShardOperationFailedException) DefaultShardOperationFailedException(org.elasticsearch.action.support.DefaultShardOperationFailedException) ShardId(org.elasticsearch.index.shard.ShardId) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Aggregations

ActionListener (org.elasticsearch.action.ActionListener)148 IOException (java.io.IOException)75 List (java.util.List)58 ThreadPool (org.elasticsearch.threadpool.ThreadPool)53 ClusterState (org.elasticsearch.cluster.ClusterState)50 ArrayList (java.util.ArrayList)46 Settings (org.elasticsearch.common.settings.Settings)45 Map (java.util.Map)42 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)41 ShardId (org.elasticsearch.index.shard.ShardId)40 Collections (java.util.Collections)39 Set (java.util.Set)39 ClusterService (org.elasticsearch.cluster.service.ClusterService)35 Logger (org.apache.logging.log4j.Logger)34 HashMap (java.util.HashMap)32 TransportService (org.elasticsearch.transport.TransportService)32 ElasticsearchException (org.elasticsearch.ElasticsearchException)31 Collectors (java.util.stream.Collectors)30 Index (org.elasticsearch.index.Index)30 Test (org.junit.Test)30