Search in sources :

Example 1 with ShardIterator

use of org.opensearch.cluster.routing.ShardIterator in project OpenSearch by opensearch-project.

the class TransportSearchAction method executeSearch.

private void executeSearch(SearchTask task, SearchTimeProvider timeProvider, SearchRequest searchRequest, OriginalIndices localIndices, List<SearchShardIterator> remoteShardIterators, BiFunction<String, String, DiscoveryNode> remoteConnections, ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap, ActionListener<SearchResponse> listener, SearchResponse.Clusters clusters, @Nullable SearchContextId searchContext, SearchAsyncActionProvider searchAsyncActionProvider) {
    clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
    // TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
    // date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
    // of just for the _search api
    final List<SearchShardIterator> localShardIterators;
    final Map<String, AliasFilter> aliasFilter;
    final Map<String, Set<String>> indexRoutings;
    final String[] concreteLocalIndices;
    if (searchContext != null) {
        assert searchRequest.pointInTimeBuilder() != null;
        aliasFilter = searchContext.aliasFilter();
        indexRoutings = Collections.emptyMap();
        concreteLocalIndices = localIndices == null ? new String[0] : localIndices.indices();
        localShardIterators = getLocalLocalShardsIteratorFromPointInTime(clusterState, localIndices, searchRequest.getLocalClusterAlias(), searchContext, searchRequest.pointInTimeBuilder().getKeepAlive());
    } else {
        final Index[] indices = resolveLocalIndices(localIndices, clusterState, timeProvider);
        Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
        routingMap = routingMap == null ? Collections.emptyMap() : Collections.unmodifiableMap(routingMap);
        concreteLocalIndices = new String[indices.length];
        for (int i = 0; i < indices.length; i++) {
            concreteLocalIndices[i] = indices[i].getName();
        }
        Map<String, Long> nodeSearchCounts = searchTransportService.getPendingSearchRequests();
        GroupShardsIterator<ShardIterator> localShardRoutings = clusterService.operationRouting().searchShards(clusterState, concreteLocalIndices, routingMap, searchRequest.preference(), searchService.getResponseCollectorService(), nodeSearchCounts);
        localShardIterators = StreamSupport.stream(localShardRoutings.spliterator(), false).map(it -> new SearchShardIterator(searchRequest.getLocalClusterAlias(), it.shardId(), it.getShardRoutings(), localIndices)).collect(Collectors.toList());
        aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
        indexRoutings = routingMap;
    }
    final GroupShardsIterator<SearchShardIterator> shardIterators = mergeShardsIterators(localShardIterators, remoteShardIterators);
    failIfOverShardCountLimit(clusterService, shardIterators.size());
    Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState);
    // optimize search type for cases where there is only one shard group to search on
    if (shardIterators.size() == 1) {
        // if we only have one group, then we always want Q_T_F, no need for DFS, and no need to do THEN since we hit one shard
        searchRequest.searchType(QUERY_THEN_FETCH);
    }
    if (searchRequest.allowPartialSearchResults() == null) {
        // No user preference defined in search request - apply cluster service default
        searchRequest.allowPartialSearchResults(searchService.defaultAllowPartialSearchResults());
    }
    if (searchRequest.isSuggestOnly()) {
        // disable request cache if we have only suggest
        searchRequest.requestCache(false);
        switch(searchRequest.searchType()) {
            case DFS_QUERY_THEN_FETCH:
                // convert to Q_T_F if we have only suggest
                searchRequest.searchType(QUERY_THEN_FETCH);
                break;
        }
    }
    final DiscoveryNodes nodes = clusterState.nodes();
    BiFunction<String, String, Transport.Connection> connectionLookup = buildConnectionLookup(searchRequest.getLocalClusterAlias(), nodes::get, remoteConnections, searchTransportService::getConnection);
    final Executor asyncSearchExecutor = asyncSearchExecutor(concreteLocalIndices, clusterState);
    final boolean preFilterSearchShards = shouldPreFilterSearchShards(clusterState, searchRequest, concreteLocalIndices, localShardIterators.size() + remoteShardIterators.size());
    searchAsyncActionProvider.asyncSearchAction(task, searchRequest, asyncSearchExecutor, shardIterators, timeProvider, connectionLookup, clusterState, Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, indexRoutings, listener, preFilterSearchShards, threadPool, clusters).start();
}
Also used : AliasFilter(org.opensearch.search.internal.AliasFilter) Set(java.util.Set) HashSet(java.util.HashSet) Index(org.opensearch.index.Index) Executor(java.util.concurrent.Executor) ShardIterator(org.opensearch.cluster.routing.ShardIterator) DiscoveryNodes(org.opensearch.cluster.node.DiscoveryNodes)

Example 2 with ShardIterator

use of org.opensearch.cluster.routing.ShardIterator in project OpenSearch by opensearch-project.

the class SearchStatsIT method nodeIdsWithIndex.

private Set<String> nodeIdsWithIndex(String... indices) {
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    GroupShardsIterator<ShardIterator> allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
    Set<String> nodes = new HashSet<>();
    for (ShardIterator shardIterator : allAssignedShardsGrouped) {
        for (ShardRouting routing : shardIterator) {
            if (routing.active()) {
                nodes.add(routing.currentNodeId());
            }
        }
    }
    return nodes;
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) ShardIterator(org.opensearch.cluster.routing.ShardIterator) ShardRouting(org.opensearch.cluster.routing.ShardRouting) HashSet(java.util.HashSet)

Example 3 with ShardIterator

use of org.opensearch.cluster.routing.ShardIterator in project OpenSearch by opensearch-project.

the class SuggestStatsIT method nodeIdsWithIndex.

private Set<String> nodeIdsWithIndex(String... indices) {
    ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
    GroupShardsIterator<ShardIterator> allAssignedShardsGrouped = state.routingTable().allAssignedShardsGrouped(indices, true);
    Set<String> nodes = new HashSet<>();
    for (ShardIterator shardIterator : allAssignedShardsGrouped) {
        for (ShardRouting routing : shardIterator) {
            if (routing.active()) {
                nodes.add(routing.currentNodeId());
            }
        }
    }
    return nodes;
}
Also used : ClusterState(org.opensearch.cluster.ClusterState) ShardIterator(org.opensearch.cluster.routing.ShardIterator) ShardRouting(org.opensearch.cluster.routing.ShardRouting) HashSet(java.util.HashSet)

Example 4 with ShardIterator

use of org.opensearch.cluster.routing.ShardIterator in project OpenSearch by opensearch-project.

the class CorruptedFileIT method corruptRandomPrimaryFile.

private ShardRouting corruptRandomPrimaryFile(final boolean includePerCommitFiles) throws IOException {
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    Index test = state.metadata().index("test").getIndex();
    GroupShardsIterator shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[] { "test" }, false);
    List<ShardIterator> iterators = iterableAsArrayList(shardIterators);
    ShardIterator shardIterator = RandomPicks.randomFrom(random(), iterators);
    ShardRouting shardRouting = shardIterator.nextOrNull();
    assertNotNull(shardRouting);
    assertTrue(shardRouting.primary());
    assertTrue(shardRouting.assignedToNode());
    String nodeId = shardRouting.currentNodeId();
    NodesStatsResponse nodeStatses = client().admin().cluster().prepareNodesStats(nodeId).addMetric(FS.metricName()).get();
    // treeset makes sure iteration order is deterministic
    Set<Path> files = new TreeSet<>();
    for (FsInfo.Path info : nodeStatses.getNodes().get(0).getFs()) {
        String path = info.getPath();
        Path file = PathUtils.get(path).resolve("indices").resolve(test.getUUID()).resolve(Integer.toString(shardRouting.getId())).resolve("index");
        if (Files.exists(file)) {
            // multi data path might only have one path in use
            try (Directory dir = FSDirectory.open(file)) {
                SegmentInfos segmentCommitInfos = Lucene.readSegmentInfos(dir);
                if (includePerCommitFiles) {
                    files.add(file.resolve(segmentCommitInfos.getSegmentsFileName()));
                }
                for (SegmentCommitInfo commitInfo : segmentCommitInfos) {
                    if (commitInfo.getDelCount() + commitInfo.getSoftDelCount() == commitInfo.info.maxDoc()) {
                        // don't corrupt fully deleted segments - they might be removed on snapshot
                        continue;
                    }
                    for (String commitFile : commitInfo.files()) {
                        if (includePerCommitFiles || isPerSegmentFile(commitFile)) {
                            files.add(file.resolve(commitFile));
                        }
                    }
                }
            }
        }
    }
    CorruptionUtils.corruptFile(random(), files.toArray(new Path[0]));
    return shardRouting;
}
Also used : Path(java.nio.file.Path) ClusterState(org.opensearch.cluster.ClusterState) SegmentInfos(org.apache.lucene.index.SegmentInfos) SegmentCommitInfo(org.apache.lucene.index.SegmentCommitInfo) CheckIndex(org.apache.lucene.index.CheckIndex) Index(org.opensearch.index.Index) NodesStatsResponse(org.opensearch.action.admin.cluster.node.stats.NodesStatsResponse) FsInfo(org.opensearch.monitor.fs.FsInfo) GroupShardsIterator(org.opensearch.cluster.routing.GroupShardsIterator) TreeSet(java.util.TreeSet) ShardIterator(org.opensearch.cluster.routing.ShardIterator) ShardRouting(org.opensearch.cluster.routing.ShardRouting) Directory(org.apache.lucene.store.Directory) FSDirectory(org.apache.lucene.store.FSDirectory)

Example 5 with ShardIterator

use of org.opensearch.cluster.routing.ShardIterator in project OpenSearch by opensearch-project.

the class CorruptedFileIT method testCorruptPrimaryNoReplica.

/**
 * Tests corruption that happens on a single shard when no replicas are present. We make sure that the primary stays unassigned
 * and all other replicas for the healthy shards happens
 */
public void testCorruptPrimaryNoReplica() throws ExecutionException, InterruptedException, IOException {
    int numDocs = scaledRandomIntBetween(100, 1000);
    internalCluster().ensureAtLeastNumDataNodes(2);
    assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "0").put(MergePolicyConfig.INDEX_MERGE_ENABLED, false).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), // no checkindex - we corrupt shards on
    false).put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(1, ByteSizeUnit.PB))));
    ensureGreen();
    IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
    for (int i = 0; i < builders.length; i++) {
        builders[i] = client().prepareIndex("test").setSource("field", "value");
    }
    indexRandom(true, builders);
    ensureGreen();
    // double flush to create safe commit in case of async durability
    assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get());
    assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).get());
    // we have to flush at least once here since we don't corrupt the translog
    SearchResponse countResponse = client().prepareSearch().setSize(0).get();
    assertHitCount(countResponse, numDocs);
    ShardRouting shardRouting = corruptRandomPrimaryFile();
    /*
         * we corrupted the primary shard - now lets make sure we never recover from it successfully
         */
    Settings build = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, "1").build();
    client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
    client().admin().cluster().prepareReroute().get();
    boolean didClusterTurnRed = waitUntil(() -> {
        ClusterHealthStatus test = client().admin().cluster().health(Requests.clusterHealthRequest("test")).actionGet().getStatus();
        return test == ClusterHealthStatus.RED;
    }, 5, // sometimes on slow nodes the replication / recovery is just dead slow
    TimeUnit.MINUTES);
    final ClusterHealthResponse response = client().admin().cluster().health(Requests.clusterHealthRequest("test")).get();
    if (response.getStatus() != ClusterHealthStatus.RED) {
        logger.info("Cluster turned red in busy loop: {}", didClusterTurnRed);
        logger.info("cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
    }
    assertThat(response.getStatus(), is(ClusterHealthStatus.RED));
    ClusterState state = client().admin().cluster().prepareState().get().getState();
    GroupShardsIterator<ShardIterator> shardIterators = state.getRoutingTable().activePrimaryShardsGrouped(new String[] { "test" }, false);
    for (ShardIterator iterator : shardIterators) {
        ShardRouting routing;
        while ((routing = iterator.nextOrNull()) != null) {
            if (routing.getId() == shardRouting.getId()) {
                assertThat(routing.state(), equalTo(ShardRoutingState.UNASSIGNED));
            } else {
                assertThat(routing.state(), anyOf(equalTo(ShardRoutingState.RELOCATING), equalTo(ShardRoutingState.STARTED)));
            }
        }
    }
    final List<Path> files = listShardFiles(shardRouting);
    Path corruptedFile = null;
    for (Path file : files) {
        if (file.getFileName().toString().startsWith("corrupted_")) {
            corruptedFile = file;
            break;
        }
    }
    assertThat(corruptedFile, notNullValue());
}
Also used : Path(java.nio.file.Path) ClusterState(org.opensearch.cluster.ClusterState) ClusterHealthResponse(org.opensearch.action.admin.cluster.health.ClusterHealthResponse) ByteSizeValue(org.opensearch.common.unit.ByteSizeValue) SearchResponse(org.opensearch.action.search.SearchResponse) IndexRequestBuilder(org.opensearch.action.index.IndexRequestBuilder) ClusterHealthStatus(org.opensearch.cluster.health.ClusterHealthStatus) ShardIterator(org.opensearch.cluster.routing.ShardIterator) ShardRouting(org.opensearch.cluster.routing.ShardRouting) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Aggregations

ShardIterator (org.opensearch.cluster.routing.ShardIterator)20 ShardRouting (org.opensearch.cluster.routing.ShardRouting)14 ClusterState (org.opensearch.cluster.ClusterState)12 PlainShardIterator (org.opensearch.cluster.routing.PlainShardIterator)8 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)7 Metadata (org.opensearch.cluster.metadata.Metadata)7 RoutingTable (org.opensearch.cluster.routing.RoutingTable)5 ShardId (org.opensearch.index.shard.ShardId)5 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)4 GroupShardsIterator (org.opensearch.cluster.routing.GroupShardsIterator)4 OperationRouting (org.opensearch.cluster.routing.OperationRouting)4 HashMap (java.util.HashMap)3 HashSet (java.util.HashSet)3 Matchers.containsString (org.hamcrest.Matchers.containsString)3 Version (org.opensearch.Version)3 Settings (org.opensearch.common.settings.Settings)3 Path (java.nio.file.Path)2 Collections.emptyMap (java.util.Collections.emptyMap)2 Collections.emptySet (java.util.Collections.emptySet)2 Map (java.util.Map)2