Search in sources :

Example 71 with DiscoveryNodes

use of org.elasticsearch.cluster.node.DiscoveryNodes in project crate by crate.

the class AsyncShardFetchTests method testFullCircleSingleNodeFailure.

public void testFullCircleSingleNodeFailure() throws Exception {
    DiscoveryNodes nodes = DiscoveryNodes.builder().add(node1).build();
    // add a failed response for node1
    test.addSimulation(node1.getId(), failure1);
    // first fetch, no data, still on going
    AsyncShardFetch.FetchResult<Response> fetchData = test.fetchData(nodes, emptySet());
    assertThat(fetchData.hasData(), equalTo(false));
    assertThat(test.reroute.get(), equalTo(0));
    // fire a response, wait on reroute incrementing
    test.fireSimulationAndWait(node1.getId());
    // failure, fetched data exists, but has no data
    assertThat(test.reroute.get(), equalTo(1));
    fetchData = test.fetchData(nodes, emptySet());
    assertThat(fetchData.hasData(), equalTo(true));
    assertThat(fetchData.getData().size(), equalTo(0));
    // on failure, we reset the failure on a successive call to fetchData, and try again afterwards
    test.addSimulation(node1.getId(), response1);
    fetchData = test.fetchData(nodes, emptySet());
    assertThat(fetchData.hasData(), equalTo(false));
    test.fireSimulationAndWait(node1.getId());
    // 2 reroutes, cause we have a failure that we clear
    assertThat(test.reroute.get(), equalTo(3));
    fetchData = test.fetchData(nodes, emptySet());
    assertThat(fetchData.hasData(), equalTo(true));
    assertThat(fetchData.getData().size(), equalTo(1));
    assertThat(fetchData.getData().get(node1), sameInstance(response1));
}
Also used : BaseNodeResponse(org.elasticsearch.action.support.nodes.BaseNodeResponse) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes)

Example 72 with DiscoveryNodes

use of org.elasticsearch.cluster.node.DiscoveryNodes in project elasticsearch by elastic.

the class MetaDataCreateIndexService method onlyCreateIndex.

private void onlyCreateIndex(final CreateIndexClusterStateUpdateRequest request, final ActionListener<ClusterStateUpdateResponse> listener) {
    Settings.Builder updatedSettingsBuilder = Settings.builder();
    updatedSettingsBuilder.put(request.settings()).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
    indexScopedSettings.validate(updatedSettingsBuilder);
    request.settings(updatedSettingsBuilder.build());
    clusterService.submitStateUpdateTask("create-index [" + request.index() + "], cause [" + request.cause() + "]", new AckedClusterStateUpdateTask<ClusterStateUpdateResponse>(Priority.URGENT, request, wrapPreservingContext(listener)) {

        @Override
        protected ClusterStateUpdateResponse newResponse(boolean acknowledged) {
            return new ClusterStateUpdateResponse(acknowledged);
        }

        @Override
        public ClusterState execute(ClusterState currentState) throws Exception {
            Index createdIndex = null;
            String removalExtraInfo = null;
            IndexRemovalReason removalReason = IndexRemovalReason.FAILURE;
            try {
                validate(request, currentState);
                for (Alias alias : request.aliases()) {
                    aliasValidator.validateAlias(alias, request.index(), currentState.metaData());
                }
                // we only find a template when its an API call (a new index)
                // find templates, highest order are better matching
                List<IndexTemplateMetaData> templates = findTemplates(request, currentState);
                Map<String, Custom> customs = new HashMap<>();
                // add the request mapping
                Map<String, Map<String, Object>> mappings = new HashMap<>();
                Map<String, AliasMetaData> templatesAliases = new HashMap<>();
                List<String> templateNames = new ArrayList<>();
                for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
                    mappings.put(entry.getKey(), MapperService.parseMapping(xContentRegistry, entry.getValue()));
                }
                for (Map.Entry<String, Custom> entry : request.customs().entrySet()) {
                    customs.put(entry.getKey(), entry.getValue());
                }
                // apply templates, merging the mappings into the request mapping if exists
                for (IndexTemplateMetaData template : templates) {
                    templateNames.add(template.getName());
                    for (ObjectObjectCursor<String, CompressedXContent> cursor : template.mappings()) {
                        String mappingString = cursor.value.string();
                        if (mappings.containsKey(cursor.key)) {
                            XContentHelper.mergeDefaults(mappings.get(cursor.key), MapperService.parseMapping(xContentRegistry, mappingString));
                        } else {
                            mappings.put(cursor.key, MapperService.parseMapping(xContentRegistry, mappingString));
                        }
                    }
                    // handle custom
                    for (ObjectObjectCursor<String, Custom> cursor : template.customs()) {
                        String type = cursor.key;
                        IndexMetaData.Custom custom = cursor.value;
                        IndexMetaData.Custom existing = customs.get(type);
                        if (existing == null) {
                            customs.put(type, custom);
                        } else {
                            IndexMetaData.Custom merged = existing.mergeWith(custom);
                            customs.put(type, merged);
                        }
                    }
                    //handle aliases
                    for (ObjectObjectCursor<String, AliasMetaData> cursor : template.aliases()) {
                        AliasMetaData aliasMetaData = cursor.value;
                        // ignore this one taken from the index template
                        if (request.aliases().contains(new Alias(aliasMetaData.alias()))) {
                            continue;
                        }
                        //if an alias with same name was already processed, ignore this one
                        if (templatesAliases.containsKey(cursor.key)) {
                            continue;
                        }
                        //Allow templatesAliases to be templated by replacing a token with the name of the index that we are applying it to
                        if (aliasMetaData.alias().contains("{index}")) {
                            String templatedAlias = aliasMetaData.alias().replace("{index}", request.index());
                            aliasMetaData = AliasMetaData.newAliasMetaData(aliasMetaData, templatedAlias);
                        }
                        aliasValidator.validateAliasMetaData(aliasMetaData, request.index(), currentState.metaData());
                        templatesAliases.put(aliasMetaData.alias(), aliasMetaData);
                    }
                }
                Settings.Builder indexSettingsBuilder = Settings.builder();
                // apply templates, here, in reverse order, since first ones are better matching
                for (int i = templates.size() - 1; i >= 0; i--) {
                    indexSettingsBuilder.put(templates.get(i).settings());
                }
                // now, put the request settings, so they override templates
                indexSettingsBuilder.put(request.settings());
                if (indexSettingsBuilder.get(SETTING_NUMBER_OF_SHARDS) == null) {
                    indexSettingsBuilder.put(SETTING_NUMBER_OF_SHARDS, settings.getAsInt(SETTING_NUMBER_OF_SHARDS, 5));
                }
                if (indexSettingsBuilder.get(SETTING_NUMBER_OF_REPLICAS) == null) {
                    indexSettingsBuilder.put(SETTING_NUMBER_OF_REPLICAS, settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, 1));
                }
                if (settings.get(SETTING_AUTO_EXPAND_REPLICAS) != null && indexSettingsBuilder.get(SETTING_AUTO_EXPAND_REPLICAS) == null) {
                    indexSettingsBuilder.put(SETTING_AUTO_EXPAND_REPLICAS, settings.get(SETTING_AUTO_EXPAND_REPLICAS));
                }
                if (indexSettingsBuilder.get(SETTING_VERSION_CREATED) == null) {
                    DiscoveryNodes nodes = currentState.nodes();
                    final Version createdVersion = Version.min(Version.CURRENT, nodes.getSmallestNonClientNodeVersion());
                    indexSettingsBuilder.put(SETTING_VERSION_CREATED, createdVersion);
                }
                if (indexSettingsBuilder.get(SETTING_CREATION_DATE) == null) {
                    indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
                }
                indexSettingsBuilder.put(IndexMetaData.SETTING_INDEX_PROVIDED_NAME, request.getProvidedName());
                indexSettingsBuilder.put(SETTING_INDEX_UUID, UUIDs.randomBase64UUID());
                final Index shrinkFromIndex = request.shrinkFrom();
                int routingNumShards = IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.get(indexSettingsBuilder.build());
                ;
                if (shrinkFromIndex != null) {
                    prepareShrinkIndexSettings(currentState, mappings.keySet(), indexSettingsBuilder, shrinkFromIndex, request.index());
                    IndexMetaData sourceMetaData = currentState.metaData().getIndexSafe(shrinkFromIndex);
                    routingNumShards = sourceMetaData.getRoutingNumShards();
                }
                Settings actualIndexSettings = indexSettingsBuilder.build();
                IndexMetaData.Builder tmpImdBuilder = IndexMetaData.builder(request.index()).setRoutingNumShards(routingNumShards);
                // Set up everything, now locally create the index to see that things are ok, and apply
                final IndexMetaData tmpImd = tmpImdBuilder.settings(actualIndexSettings).build();
                ActiveShardCount waitForActiveShards = request.waitForActiveShards();
                if (waitForActiveShards == ActiveShardCount.DEFAULT) {
                    waitForActiveShards = tmpImd.getWaitForActiveShards();
                }
                if (waitForActiveShards.validate(tmpImd.getNumberOfReplicas()) == false) {
                    throw new IllegalArgumentException("invalid wait_for_active_shards[" + request.waitForActiveShards() + "]: cannot be greater than number of shard copies [" + (tmpImd.getNumberOfReplicas() + 1) + "]");
                }
                // create the index here (on the master) to validate it can be created, as well as adding the mapping
                final IndexService indexService = indicesService.createIndex(tmpImd, Collections.emptyList(), shardId -> {
                });
                createdIndex = indexService.index();
                // now add the mappings
                MapperService mapperService = indexService.mapperService();
                try {
                    mapperService.merge(mappings, MergeReason.MAPPING_UPDATE, request.updateAllTypes());
                } catch (Exception e) {
                    removalExtraInfo = "failed on parsing default mapping/mappings on index creation";
                    throw e;
                }
                // the context is only used for validation so it's fine to pass fake values for the shard id and the current
                // timestamp
                final QueryShardContext queryShardContext = indexService.newQueryShardContext(0, null, () -> 0L);
                for (Alias alias : request.aliases()) {
                    if (Strings.hasLength(alias.filter())) {
                        aliasValidator.validateAliasFilter(alias.name(), alias.filter(), queryShardContext, xContentRegistry);
                    }
                }
                for (AliasMetaData aliasMetaData : templatesAliases.values()) {
                    if (aliasMetaData.filter() != null) {
                        aliasValidator.validateAliasFilter(aliasMetaData.alias(), aliasMetaData.filter().uncompressed(), queryShardContext, xContentRegistry);
                    }
                }
                // now, update the mappings with the actual source
                Map<String, MappingMetaData> mappingsMetaData = new HashMap<>();
                for (DocumentMapper mapper : mapperService.docMappers(true)) {
                    MappingMetaData mappingMd = new MappingMetaData(mapper);
                    mappingsMetaData.put(mapper.type(), mappingMd);
                }
                final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(request.index()).settings(actualIndexSettings).setRoutingNumShards(routingNumShards);
                for (MappingMetaData mappingMd : mappingsMetaData.values()) {
                    indexMetaDataBuilder.putMapping(mappingMd);
                }
                for (AliasMetaData aliasMetaData : templatesAliases.values()) {
                    indexMetaDataBuilder.putAlias(aliasMetaData);
                }
                for (Alias alias : request.aliases()) {
                    AliasMetaData aliasMetaData = AliasMetaData.builder(alias.name()).filter(alias.filter()).indexRouting(alias.indexRouting()).searchRouting(alias.searchRouting()).build();
                    indexMetaDataBuilder.putAlias(aliasMetaData);
                }
                for (Map.Entry<String, Custom> customEntry : customs.entrySet()) {
                    indexMetaDataBuilder.putCustom(customEntry.getKey(), customEntry.getValue());
                }
                indexMetaDataBuilder.state(request.state());
                final IndexMetaData indexMetaData;
                try {
                    indexMetaData = indexMetaDataBuilder.build();
                } catch (Exception e) {
                    removalExtraInfo = "failed to build index metadata";
                    throw e;
                }
                indexService.getIndexEventListener().beforeIndexAddedToCluster(indexMetaData.getIndex(), indexMetaData.getSettings());
                MetaData newMetaData = MetaData.builder(currentState.metaData()).put(indexMetaData, false).build();
                String maybeShadowIndicator = indexMetaData.isIndexUsingShadowReplicas() ? "s" : "";
                logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}", request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(), indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
                ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
                if (!request.blocks().isEmpty()) {
                    for (ClusterBlock block : request.blocks()) {
                        blocks.addIndexBlock(request.index(), block);
                    }
                }
                blocks.updateBlocks(indexMetaData);
                ClusterState updatedState = ClusterState.builder(currentState).blocks(blocks).metaData(newMetaData).build();
                if (request.state() == State.OPEN) {
                    RoutingTable.Builder routingTableBuilder = RoutingTable.builder(updatedState.routingTable()).addAsNew(updatedState.metaData().index(request.index()));
                    updatedState = allocationService.reroute(ClusterState.builder(updatedState).routingTable(routingTableBuilder.build()).build(), "index [" + request.index() + "] created");
                }
                removalExtraInfo = "cleaning up after validating index on master";
                removalReason = IndexRemovalReason.NO_LONGER_ASSIGNED;
                return updatedState;
            } finally {
                if (createdIndex != null) {
                    // Index was already partially created - need to clean up
                    indicesService.removeIndex(createdIndex, removalReason, removalExtraInfo);
                }
            }
        }

        @Override
        public void onFailure(String source, Exception e) {
            if (e instanceof ResourceAlreadyExistsException) {
                logger.trace((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
            } else {
                logger.debug((Supplier<?>) () -> new ParameterizedMessage("[{}] failed to create", request.index()), e);
            }
            super.onFailure(source, e);
        }
    });
}
Also used : ElasticsearchException(org.elasticsearch.ElasticsearchException) SETTING_INDEX_UUID(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_INDEX_UUID) DateTimeZone(org.joda.time.DateTimeZone) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) Alias(org.elasticsearch.action.admin.indices.alias.Alias) Environment(org.elasticsearch.env.Environment) BiFunction(java.util.function.BiFunction) AllocationService(org.elasticsearch.cluster.routing.allocation.AllocationService) ClusterBlocks(org.elasticsearch.cluster.block.ClusterBlocks) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) CreateIndexClusterStateUpdateResponse(org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse) ClusterBlock(org.elasticsearch.cluster.block.ClusterBlock) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) CompressedXContent(org.elasticsearch.common.compress.CompressedXContent) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IndexCreationException(org.elasticsearch.indices.IndexCreationException) Locale(java.util.Locale) Map(java.util.Map) ValidationException(org.elasticsearch.common.ValidationException) ThreadPool(org.elasticsearch.threadpool.ThreadPool) State(org.elasticsearch.cluster.metadata.IndexMetaData.State) Path(java.nio.file.Path) NamedXContentRegistry(org.elasticsearch.common.xcontent.NamedXContentRegistry) CreateIndexClusterStateUpdateRequest(org.elasticsearch.action.admin.indices.create.CreateIndexClusterStateUpdateRequest) Priority(org.elasticsearch.common.Priority) Predicate(java.util.function.Predicate) UUIDs(org.elasticsearch.common.UUIDs) Set(java.util.Set) ObjectCursor(com.carrotsearch.hppc.cursors.ObjectCursor) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) ContextPreservingActionListener(org.elasticsearch.action.support.ContextPreservingActionListener) MapperService(org.elasticsearch.index.mapper.MapperService) List(java.util.List) Version(org.elasticsearch.Version) IndexRoutingTable(org.elasticsearch.cluster.routing.IndexRoutingTable) Supplier(org.apache.logging.log4j.util.Supplier) ClusterStateUpdateResponse(org.elasticsearch.cluster.ack.ClusterStateUpdateResponse) InvalidIndexNameException(org.elasticsearch.indices.InvalidIndexNameException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) AckedClusterStateUpdateTask(org.elasticsearch.cluster.AckedClusterStateUpdateTask) ClusterService(org.elasticsearch.cluster.service.ClusterService) HashMap(java.util.HashMap) Index(org.elasticsearch.index.Index) ShardRoutingState(org.elasticsearch.cluster.routing.ShardRoutingState) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) ActiveShardsObserver(org.elasticsearch.action.support.ActiveShardsObserver) Strings(org.elasticsearch.common.Strings) Inject(org.elasticsearch.common.inject.Inject) ArrayList(java.util.ArrayList) SETTING_NUMBER_OF_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS) XContentHelper(org.elasticsearch.common.xcontent.XContentHelper) IndexRemovalReason(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason) Custom(org.elasticsearch.cluster.metadata.IndexMetaData.Custom) Regex(org.elasticsearch.common.regex.Regex) SETTING_VERSION_CREATED(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_VERSION_CREATED) IndicesService(org.elasticsearch.indices.IndicesService) SETTING_AUTO_EXPAND_REPLICAS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS) ClusterBlockLevel(org.elasticsearch.cluster.block.ClusterBlockLevel) SETTING_CREATION_DATE(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_CREATION_DATE) PathUtils(org.elasticsearch.common.io.PathUtils) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) AbstractComponent(org.elasticsearch.common.component.AbstractComponent) SETTING_NUMBER_OF_SHARDS(org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS) IndexService(org.elasticsearch.index.IndexService) DateTime(org.joda.time.DateTime) IOException(java.io.IOException) IndexScopedSettings(org.elasticsearch.common.settings.IndexScopedSettings) CollectionUtil(org.apache.lucene.util.CollectionUtil) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) MergeReason(org.elasticsearch.index.mapper.MapperService.MergeReason) Comparator(java.util.Comparator) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) IndexService(org.elasticsearch.index.IndexService) Index(org.elasticsearch.index.Index) DateTime(org.joda.time.DateTime) ClusterBlock(org.elasticsearch.cluster.block.ClusterBlock) Version(org.elasticsearch.Version) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) List(java.util.List) ArrayList(java.util.ArrayList) Supplier(org.apache.logging.log4j.util.Supplier) CreateIndexClusterStateUpdateResponse(org.elasticsearch.cluster.ack.CreateIndexClusterStateUpdateResponse) ClusterStateUpdateResponse(org.elasticsearch.cluster.ack.ClusterStateUpdateResponse) Settings(org.elasticsearch.common.settings.Settings) IndexScopedSettings(org.elasticsearch.common.settings.IndexScopedSettings) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) ClusterState(org.elasticsearch.cluster.ClusterState) DocumentMapper(org.elasticsearch.index.mapper.DocumentMapper) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) Custom(org.elasticsearch.cluster.metadata.IndexMetaData.Custom) ActiveShardCount(org.elasticsearch.action.support.ActiveShardCount) ElasticsearchException(org.elasticsearch.ElasticsearchException) IndexNotFoundException(org.elasticsearch.index.IndexNotFoundException) IndexCreationException(org.elasticsearch.indices.IndexCreationException) ValidationException(org.elasticsearch.common.ValidationException) InvalidIndexNameException(org.elasticsearch.indices.InvalidIndexNameException) UnsupportedEncodingException(java.io.UnsupportedEncodingException) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) IOException(java.io.IOException) IndexRemovalReason(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason) Alias(org.elasticsearch.action.admin.indices.alias.Alias) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) ObjectObjectCursor(com.carrotsearch.hppc.cursors.ObjectObjectCursor) Map(java.util.Map) HashMap(java.util.HashMap) MapperService(org.elasticsearch.index.mapper.MapperService)

Example 73 with DiscoveryNodes

use of org.elasticsearch.cluster.node.DiscoveryNodes in project elasticsearch by elastic.

the class ClusterService method doStart.

@Override
protected synchronized void doStart() {
    Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
    Objects.requireNonNull(nodeConnectionsService, "please set the node connection service before starting");
    Objects.requireNonNull(discoverySettings, "please set discovery settings before starting");
    addListener(localNodeMasterListeners);
    DiscoveryNode localNode = localNodeSupplier.get();
    assert localNode != null;
    updateState(state -> {
        assert state.nodes().getLocalNodeId() == null : "local node is already set";
        DiscoveryNodes nodes = DiscoveryNodes.builder(state.nodes()).add(localNode).localNodeId(localNode.getId()).build();
        return ClusterState.builder(state).nodes(nodes).blocks(initialBlocks).build();
    });
    this.threadPoolExecutor = EsExecutors.newSinglePrioritizing(UPDATE_THREAD_NAME, daemonThreadFactory(settings, UPDATE_THREAD_NAME), threadPool.getThreadContext());
}
Also used : DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes)

Example 74 with DiscoveryNodes

use of org.elasticsearch.cluster.node.DiscoveryNodes in project elasticsearch by elastic.

the class IndicesClusterStateService method updateShard.

private void updateShard(DiscoveryNodes nodes, ShardRouting shardRouting, Shard shard, RoutingTable routingTable, ClusterState clusterState) {
    final ShardRouting currentRoutingEntry = shard.routingEntry();
    assert currentRoutingEntry.isSameAllocation(shardRouting) : "local shard has a different allocation id but wasn't cleaning by removeShards. " + "cluster state: " + shardRouting + " local: " + currentRoutingEntry;
    try {
        shard.updateRoutingEntry(shardRouting);
        if (shardRouting.primary()) {
            IndexShardRoutingTable indexShardRoutingTable = routingTable.shardRoutingTable(shardRouting.shardId());
            Set<String> activeIds = indexShardRoutingTable.activeShards().stream().filter(sr -> nodes.get(sr.currentNodeId()).getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)).map(r -> r.allocationId().getId()).collect(Collectors.toSet());
            Set<String> initializingIds = indexShardRoutingTable.getAllInitializingShards().stream().filter(sr -> nodes.get(sr.currentNodeId()).getVersion().onOrAfter(Version.V_6_0_0_alpha1_UNRELEASED)).map(r -> r.allocationId().getId()).collect(Collectors.toSet());
            shard.updateAllocationIdsFromMaster(activeIds, initializingIds);
        }
    } catch (Exception e) {
        failAndRemoveShard(shardRouting, true, "failed updating shard routing entry", e, clusterState);
        return;
    }
    final IndexShardState state = shard.state();
    if (shardRouting.initializing() && (state == IndexShardState.STARTED || state == IndexShardState.POST_RECOVERY)) {
        // we managed to tell the master we started), mark us as started
        if (logger.isTraceEnabled()) {
            logger.trace("{} master marked shard as initializing, but shard has state [{}], resending shard started to {}", shardRouting.shardId(), state, nodes.getMasterNode());
        }
        if (nodes.getMasterNode() != null) {
            shardStateAction.shardStarted(shardRouting, "master " + nodes.getMasterNode() + " marked shard as initializing, but shard state is [" + state + "], mark shard as started", SHARD_STATE_ACTION_LISTENER, clusterState);
        }
    }
}
Also used : ShardId(org.elasticsearch.index.shard.ShardId) Arrays(java.util.Arrays) CLOSED(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.CLOSED) Nullable(org.elasticsearch.common.Nullable) FAILURE(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.FAILURE) ConcurrentCollections(org.elasticsearch.common.util.concurrent.ConcurrentCollections) SearchService(org.elasticsearch.search.SearchService) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) Type(org.elasticsearch.cluster.routing.RecoverySource.Type) ClusterState(org.elasticsearch.cluster.ClusterState) Settings(org.elasticsearch.common.settings.Settings) Map(java.util.Map) SyncedFlushService(org.elasticsearch.indices.flush.SyncedFlushService) ThreadPool(org.elasticsearch.threadpool.ThreadPool) PeerRecoveryTargetService(org.elasticsearch.indices.recovery.PeerRecoveryTargetService) Set(java.util.Set) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) ClusterChangedEvent(org.elasticsearch.cluster.ClusterChangedEvent) Collectors(java.util.stream.Collectors) NodeMappingRefreshAction(org.elasticsearch.cluster.action.index.NodeMappingRefreshAction) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) SnapshotShardsService(org.elasticsearch.snapshots.SnapshotShardsService) List(java.util.List) Logger(org.apache.logging.log4j.Logger) Version(org.elasticsearch.Version) IndexComponent(org.elasticsearch.index.IndexComponent) Supplier(org.apache.logging.log4j.util.Supplier) IndexMetaData(org.elasticsearch.cluster.metadata.IndexMetaData) RecoveryState(org.elasticsearch.indices.recovery.RecoveryState) ShardStateAction(org.elasticsearch.cluster.action.shard.ShardStateAction) NO_LONGER_ASSIGNED(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) Callback(org.elasticsearch.common.util.Callback) LockObtainFailedException(org.apache.lucene.store.LockObtainFailedException) ClusterService(org.elasticsearch.cluster.service.ClusterService) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) PeerRecoverySourceService(org.elasticsearch.indices.recovery.PeerRecoverySourceService) RecoveryFailedException(org.elasticsearch.indices.recovery.RecoveryFailedException) HashMap(java.util.HashMap) Index(org.elasticsearch.index.Index) ParameterizedMessage(org.apache.logging.log4j.message.ParameterizedMessage) GlobalCheckpointSyncAction(org.elasticsearch.index.seqno.GlobalCheckpointSyncAction) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) Inject(org.elasticsearch.common.inject.Inject) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) HashSet(java.util.HashSet) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) TimeValue(org.elasticsearch.common.unit.TimeValue) IndexSettings(org.elasticsearch.index.IndexSettings) IndicesService(org.elasticsearch.indices.IndicesService) ClusterStateApplier(org.elasticsearch.cluster.ClusterStateApplier) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) IndexShardState(org.elasticsearch.index.shard.IndexShardState) IndexEventListener(org.elasticsearch.index.shard.IndexEventListener) Iterator(java.util.Iterator) DELETED(org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED) IndexService(org.elasticsearch.index.IndexService) GlobalCheckpointTracker(org.elasticsearch.index.seqno.GlobalCheckpointTracker) IndexShard(org.elasticsearch.index.shard.IndexShard) RoutingNode(org.elasticsearch.cluster.routing.RoutingNode) IOException(java.io.IOException) RepositoriesService(org.elasticsearch.repositories.RepositoriesService) AbstractLifecycleComponent(org.elasticsearch.common.component.AbstractLifecycleComponent) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) RoutingTable(org.elasticsearch.cluster.routing.RoutingTable) GatewayService(org.elasticsearch.gateway.GatewayService) IndexShardRoutingTable(org.elasticsearch.cluster.routing.IndexShardRoutingTable) ShardRouting(org.elasticsearch.cluster.routing.ShardRouting) IndexShardState(org.elasticsearch.index.shard.IndexShardState) ShardNotFoundException(org.elasticsearch.index.shard.ShardNotFoundException) ShardLockObtainFailedException(org.elasticsearch.env.ShardLockObtainFailedException) LockObtainFailedException(org.apache.lucene.store.LockObtainFailedException) IndexShardRelocatedException(org.elasticsearch.index.shard.IndexShardRelocatedException) RecoveryFailedException(org.elasticsearch.indices.recovery.RecoveryFailedException) ResourceAlreadyExistsException(org.elasticsearch.ResourceAlreadyExistsException) IOException(java.io.IOException)

Example 75 with DiscoveryNodes

use of org.elasticsearch.cluster.node.DiscoveryNodes in project elasticsearch by elastic.

the class TransportCancelTasksAction method taskOperation.

@Override
protected synchronized void taskOperation(CancelTasksRequest request, CancellableTask cancellableTask, ActionListener<TaskInfo> listener) {
    String nodeId = clusterService.localNode().getId();
    final boolean canceled;
    if (cancellableTask.shouldCancelChildrenOnCancellation()) {
        DiscoveryNodes childNodes = clusterService.state().nodes();
        final BanLock banLock = new BanLock(childNodes.getSize(), () -> removeBanOnNodes(cancellableTask, childNodes));
        canceled = taskManager.cancel(cancellableTask, request.getReason(), banLock::onTaskFinished);
        if (canceled) {
            // /In case the task has some child tasks, we need to wait for until ban is set on all nodes
            logger.trace("cancelling task {} on child nodes", cancellableTask.getId());
            AtomicInteger responses = new AtomicInteger(childNodes.getSize());
            List<Exception> failures = new ArrayList<>();
            setBanOnNodes(request.getReason(), cancellableTask, childNodes, new ActionListener<Void>() {

                @Override
                public void onResponse(Void aVoid) {
                    processResponse();
                }

                @Override
                public void onFailure(Exception e) {
                    synchronized (failures) {
                        failures.add(e);
                    }
                    processResponse();
                }

                private void processResponse() {
                    banLock.onBanSet();
                    if (responses.decrementAndGet() == 0) {
                        if (failures.isEmpty() == false) {
                            IllegalStateException exception = new IllegalStateException("failed to cancel children of the task [" + cancellableTask.getId() + "]");
                            failures.forEach(exception::addSuppressed);
                            listener.onFailure(exception);
                        } else {
                            listener.onResponse(cancellableTask.taskInfo(nodeId, false));
                        }
                    }
                }
            });
        }
    } else {
        canceled = taskManager.cancel(cancellableTask, request.getReason(), () -> listener.onResponse(cancellableTask.taskInfo(nodeId, false)));
        if (canceled) {
            logger.trace("task {} doesn't have any children that should be cancelled", cancellableTask.getId());
        }
    }
    if (canceled == false) {
        logger.trace("task {} is already cancelled", cancellableTask.getId());
        throw new IllegalStateException("task with id " + cancellableTask.getId() + " is already cancelled");
    }
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ArrayList(java.util.ArrayList) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) FailedNodeException(org.elasticsearch.action.FailedNodeException) ResourceNotFoundException(org.elasticsearch.ResourceNotFoundException) IOException(java.io.IOException) TransportException(org.elasticsearch.transport.TransportException)

Aggregations

DiscoveryNodes (org.elasticsearch.cluster.node.DiscoveryNodes)129 DiscoveryNode (org.elasticsearch.cluster.node.DiscoveryNode)74 ClusterState (org.elasticsearch.cluster.ClusterState)45 Settings (org.elasticsearch.common.settings.Settings)37 ArrayList (java.util.ArrayList)32 IOException (java.io.IOException)27 HashSet (java.util.HashSet)25 List (java.util.List)24 Map (java.util.Map)23 TransportService (org.elasticsearch.transport.TransportService)23 Version (org.elasticsearch.Version)22 HashMap (java.util.HashMap)20 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 ShardRouting (org.elasticsearch.cluster.routing.ShardRouting)20 Set (java.util.Set)19 TransportException (org.elasticsearch.transport.TransportException)19 Collections (java.util.Collections)18 ThreadPool (org.elasticsearch.threadpool.ThreadPool)18 CountDownLatch (java.util.concurrent.CountDownLatch)16 Collectors (java.util.stream.Collectors)16