Search in sources :

Example 1 with ShardId

use of org.opensearch.index.shard.ShardId in project OpenSearch by opensearch-project.

the class MeanReciprocalRankTests method createSearchHits.

/**
 * Create SearchHits for testing, starting from dociId 'from' up to docId 'to'.
 * The search hits index also need to be provided
 */
private static SearchHit[] createSearchHits(int from, int to, String index) {
    SearchHit[] hits = new SearchHit[to + 1 - from];
    for (int i = from; i <= to; i++) {
        hits[i] = new SearchHit(i, i + "", Collections.emptyMap(), Collections.emptyMap());
        hits[i].shard(new SearchShardTarget("testnode", new ShardId(index, "uuid", 0), null, OriginalIndices.NONE));
    }
    return hits;
}
Also used : ShardId(org.opensearch.index.shard.ShardId) SearchHit(org.opensearch.search.SearchHit) SearchShardTarget(org.opensearch.search.SearchShardTarget)

Example 2 with ShardId

use of org.opensearch.index.shard.ShardId in project OpenSearch by opensearch-project.

the class AllocationIdIT method testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStalePrimary.

public void testFailedRecoveryOnAllocateStalePrimaryRequiresAnotherAllocateStalePrimary() throws Exception {
    /*
         * Allocation id is put on start of shard while historyUUID is adjusted after recovery is done.
         *
         * If during execution of AllocateStalePrimary a proper allocation id is stored in allocation id set and recovery is failed
         * shard restart skips the stage where historyUUID is changed.
         *
         * That leads to situation where allocated stale primary and its replica belongs to the same historyUUID and
         * replica will receive operations after local checkpoint while documents before checkpoints could be significant different.
         *
         * Therefore, on AllocateStalePrimary we put some fake allocation id (no real one could be generated like that)
         * and any failure during recovery requires extra AllocateStalePrimary command to be executed.
         */
    // initial set up
    final String indexName = "index42";
    final String master = internalCluster().startMasterOnlyNode();
    String node1 = internalCluster().startNode();
    createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), "checksum").build());
    final int numDocs = indexDocs(indexName, "foo", "bar");
    final IndexSettings indexSettings = getIndexSettings(indexName, node1);
    final Set<String> allocationIds = getAllocationIds(indexName);
    final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
    final Path indexPath = getIndexPath(node1, shardId);
    assertThat(allocationIds, hasSize(1));
    final String historyUUID = historyUUID(node1, indexName);
    String node2 = internalCluster().startNode();
    ensureGreen(indexName);
    internalCluster().assertSameDocIdsOnShards();
    // initial set up is done
    Settings node1DataPathSettings = internalCluster().dataPathSettings(node1);
    Settings node2DataPathSettings = internalCluster().dataPathSettings(node2);
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1));
    // index more docs to node2 that marks node1 as stale
    int numExtraDocs = indexDocs(indexName, "foo", "bar2");
    assertHitCount(client(node2).prepareSearch(indexName).setQuery(matchAllQuery()).get(), numDocs + numExtraDocs);
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node2));
    // create fake corrupted marker on node1
    putFakeCorruptionMarker(indexSettings, shardId, indexPath);
    // thanks to master node1 is out of sync
    node1 = internalCluster().startNode(node1DataPathSettings);
    // there is only _stale_ primary
    checkNoValidShardCopy(indexName, shardId);
    // allocate stale primary
    client(node1).admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true)).get();
    // allocation fails due to corruption marker
    assertBusy(() -> {
        final ClusterState state = client().admin().cluster().prepareState().get().getState();
        final ShardRouting shardRouting = state.routingTable().index(indexName).shard(shardId.id()).primaryShard();
        assertThat(shardRouting.state(), equalTo(ShardRoutingState.UNASSIGNED));
        assertThat(shardRouting.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.ALLOCATION_FAILED));
    });
    internalCluster().stopRandomNode(InternalTestCluster.nameFilter(node1));
    try (Store store = new Store(shardId, indexSettings, new NIOFSDirectory(indexPath), new DummyShardLock(shardId))) {
        store.removeCorruptionMarker();
    }
    node1 = internalCluster().startNode(node1DataPathSettings);
    // index is red: no any shard is allocated (allocation id is a fake id that does not match to anything)
    checkHealthStatus(indexName, ClusterHealthStatus.RED);
    checkNoValidShardCopy(indexName, shardId);
    // no any valid shard is there; have to invoke AllocateStalePrimary again
    client().admin().cluster().prepareReroute().add(new AllocateStalePrimaryAllocationCommand(indexName, 0, node1, true)).get();
    ensureYellow(indexName);
    // bring node2 back
    node2 = internalCluster().startNode(node2DataPathSettings);
    ensureGreen(indexName);
    assertThat(historyUUID(node1, indexName), not(equalTo(historyUUID)));
    assertThat(historyUUID(node1, indexName), equalTo(historyUUID(node2, indexName)));
    internalCluster().assertSameDocIdsOnShards();
}
Also used : ShardPath(org.opensearch.index.shard.ShardPath) Path(java.nio.file.Path) ClusterState(org.opensearch.cluster.ClusterState) NIOFSDirectory(org.apache.lucene.store.NIOFSDirectory) AllocateStalePrimaryAllocationCommand(org.opensearch.cluster.routing.allocation.command.AllocateStalePrimaryAllocationCommand) IndexSettings(org.opensearch.index.IndexSettings) Store(org.opensearch.index.store.Store) ShardId(org.opensearch.index.shard.ShardId) DummyShardLock(org.opensearch.test.DummyShardLock) Settings(org.opensearch.common.settings.Settings) IndexSettings(org.opensearch.index.IndexSettings)

Example 3 with ShardId

use of org.opensearch.index.shard.ShardId in project OpenSearch by opensearch-project.

the class ClusterRerouteIT method rerouteWithAllocateLocalGateway.

private void rerouteWithAllocateLocalGateway(Settings commonSettings) throws Exception {
    logger.info("--> starting 2 nodes");
    String node_1 = internalCluster().startNode(commonSettings);
    internalCluster().startNode(commonSettings);
    assertThat(cluster().size(), equalTo(2));
    ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));
    logger.info("--> create an index with 1 shard, 1 replica, nothing should allocate");
    client().admin().indices().prepareCreate("test").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder().put("index.number_of_shards", 1)).execute().actionGet();
    final boolean closed = randomBoolean();
    if (closed) {
        client().admin().indices().prepareClose("test").get();
    }
    ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
    assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
    logger.info("--> explicitly allocate shard 1, actually allocating, no dry run");
    state = client().admin().cluster().prepareReroute().setExplain(randomBoolean()).add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)).execute().actionGet().getState();
    assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
    assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING));
    healthResponse = client().admin().cluster().prepareHealth().setIndices("test").setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().execute().actionGet();
    assertThat(healthResponse.isTimedOut(), equalTo(false));
    logger.info("--> get the state, verify shard 1 primary allocated");
    state = client().admin().cluster().prepareState().execute().actionGet().getState();
    assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
    assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.STARTED));
    if (closed == false) {
        client().prepareIndex("test").setId("1").setSource("field", "value").setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
    }
    final Index index = resolveIndex("test");
    logger.info("--> closing all nodes");
    Path[] shardLocation = internalCluster().getInstance(NodeEnvironment.class, node_1).availableShardPaths(new ShardId(index, 0));
    // make sure the data is there!
    assertThat(FileSystemUtils.exists(shardLocation), equalTo(true));
    // don't wipe data directories the index needs to be there!
    internalCluster().closeNonSharedNodes(false);
    logger.info("--> deleting the shard data [{}] ", Arrays.toString(shardLocation));
    // verify again after cluster was shut down
    assertThat(FileSystemUtils.exists(shardLocation), equalTo(true));
    IOUtils.rm(shardLocation);
    logger.info("--> starting nodes back, will not allocate the shard since it has no data, but the index will be there");
    node_1 = internalCluster().startNode(commonSettings);
    internalCluster().startNode(commonSettings);
    // wait a bit for the cluster to realize that the shard is not there...
    // TODO can we get around this? the cluster is RED, so what do we wait for?
    client().admin().cluster().prepareReroute().get();
    assertThat(client().admin().cluster().prepareHealth().setIndices("test").setWaitForNodes("2").execute().actionGet().getStatus(), equalTo(ClusterHealthStatus.RED));
    logger.info("--> explicitly allocate primary");
    state = client().admin().cluster().prepareReroute().setExplain(randomBoolean()).add(new AllocateEmptyPrimaryAllocationCommand("test", 0, node_1, true)).execute().actionGet().getState();
    assertThat(state.getRoutingNodes().unassigned().size(), equalTo(1));
    assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(), equalTo(ShardRoutingState.INITIALIZING));
    logger.info("--> get the state, verify shard 1 primary allocated");
    final String nodeToCheck = node_1;
    assertBusy(() -> {
        ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
        String nodeId = clusterState.nodes().resolveNode(nodeToCheck).getId();
        assertThat(clusterState.getRoutingNodes().node(nodeId).iterator().next().state(), equalTo(ShardRoutingState.STARTED));
    });
}
Also used : Path(java.nio.file.Path) ShardId(org.opensearch.index.shard.ShardId) ClusterState(org.opensearch.cluster.ClusterState) ClusterHealthResponse(org.opensearch.action.admin.cluster.health.ClusterHealthResponse) NodeEnvironment(org.opensearch.env.NodeEnvironment) AllocateEmptyPrimaryAllocationCommand(org.opensearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand) Index(org.opensearch.index.Index) Matchers.containsString(org.hamcrest.Matchers.containsString)

Example 4 with ShardId

use of org.opensearch.index.shard.ShardId in project OpenSearch by opensearch-project.

the class DocWriteResponse method parseInnerToXContent.

/**
 * Parse the output of the {@link #innerToXContent(XContentBuilder, Params)} method.
 *
 * This method is intended to be called by subclasses and must be called multiple times to parse all the information concerning
 * {@link DocWriteResponse} objects. It always parses the current token, updates the given parsing context accordingly
 * if needed and then immediately returns.
 */
protected static void parseInnerToXContent(XContentParser parser, Builder context) throws IOException {
    XContentParser.Token token = parser.currentToken();
    ensureExpectedToken(XContentParser.Token.FIELD_NAME, token, parser);
    String currentFieldName = parser.currentName();
    token = parser.nextToken();
    if (token.isValue()) {
        if (_INDEX.equals(currentFieldName)) {
            // index uuid and shard id are unknown and can't be parsed back for now.
            context.setShardId(new ShardId(new Index(parser.text(), IndexMetadata.INDEX_UUID_NA_VALUE), -1));
        } else if (_ID.equals(currentFieldName)) {
            context.setId(parser.text());
        } else if (_VERSION.equals(currentFieldName)) {
            context.setVersion(parser.longValue());
        } else if (RESULT.equals(currentFieldName)) {
            String result = parser.text();
            for (Result r : Result.values()) {
                if (r.getLowercase().equals(result)) {
                    context.setResult(r);
                    break;
                }
            }
        } else if (FORCED_REFRESH.equals(currentFieldName)) {
            context.setForcedRefresh(parser.booleanValue());
        } else if (_SEQ_NO.equals(currentFieldName)) {
            context.setSeqNo(parser.longValue());
        } else if (_PRIMARY_TERM.equals(currentFieldName)) {
            context.setPrimaryTerm(parser.longValue());
        }
    } else if (token == XContentParser.Token.START_OBJECT) {
        if (_SHARDS.equals(currentFieldName)) {
            context.setShardInfo(ShardInfo.fromXContent(parser));
        } else {
            // skip potential inner objects for forward compatibility
            parser.skipChildren();
        }
    } else if (token == XContentParser.Token.START_ARRAY) {
        // skip potential inner arrays for forward compatibility
        parser.skipChildren();
    }
}
Also used : ShardId(org.opensearch.index.shard.ShardId) Index(org.opensearch.index.Index) XContentParser(org.opensearch.common.xcontent.XContentParser)

Example 5 with ShardId

use of org.opensearch.index.shard.ShardId in project OpenSearch by opensearch-project.

the class TransportSearchAction method getRemoteShardsIterator.

static List<SearchShardIterator> getRemoteShardsIterator(Map<String, ClusterSearchShardsResponse> searchShardsResponses, Map<String, OriginalIndices> remoteIndicesByCluster, Map<String, AliasFilter> aliasFilterMap) {
    final List<SearchShardIterator> remoteShardIterators = new ArrayList<>();
    for (Map.Entry<String, ClusterSearchShardsResponse> entry : searchShardsResponses.entrySet()) {
        for (ClusterSearchShardsGroup clusterSearchShardsGroup : entry.getValue().getGroups()) {
            // add the cluster name to the remote index names for indices disambiguation
            // this ends up in the hits returned with the search response
            ShardId shardId = clusterSearchShardsGroup.getShardId();
            AliasFilter aliasFilter = aliasFilterMap.get(shardId.getIndex().getUUID());
            String[] aliases = aliasFilter.getAliases();
            String clusterAlias = entry.getKey();
            String[] finalIndices = aliases.length == 0 ? new String[] { shardId.getIndexName() } : aliases;
            final OriginalIndices originalIndices = remoteIndicesByCluster.get(clusterAlias);
            assert originalIndices != null : "original indices are null for clusterAlias: " + clusterAlias;
            SearchShardIterator shardIterator = new SearchShardIterator(clusterAlias, shardId, Arrays.asList(clusterSearchShardsGroup.getShards()), new OriginalIndices(finalIndices, originalIndices.indicesOptions()));
            remoteShardIterators.add(shardIterator);
        }
    }
    return remoteShardIterators;
}
Also used : ClusterSearchShardsResponse(org.opensearch.action.admin.cluster.shards.ClusterSearchShardsResponse) ShardId(org.opensearch.index.shard.ShardId) AliasFilter(org.opensearch.search.internal.AliasFilter) ArrayList(java.util.ArrayList) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ClusterSearchShardsGroup(org.opensearch.action.admin.cluster.shards.ClusterSearchShardsGroup) OriginalIndices(org.opensearch.action.OriginalIndices)

Aggregations

ShardId (org.opensearch.index.shard.ShardId)572 ClusterState (org.opensearch.cluster.ClusterState)120 ShardRouting (org.opensearch.cluster.routing.ShardRouting)115 Settings (org.opensearch.common.settings.Settings)114 Index (org.opensearch.index.Index)110 IndexMetadata (org.opensearch.cluster.metadata.IndexMetadata)104 ArrayList (java.util.ArrayList)103 DiscoveryNode (org.opensearch.cluster.node.DiscoveryNode)91 IOException (java.io.IOException)84 HashMap (java.util.HashMap)78 Map (java.util.Map)71 IndexShardRoutingTable (org.opensearch.cluster.routing.IndexShardRoutingTable)70 SearchShardTarget (org.opensearch.search.SearchShardTarget)70 List (java.util.List)66 ActionListener (org.opensearch.action.ActionListener)64 IndexShard (org.opensearch.index.shard.IndexShard)64 Matchers.containsString (org.hamcrest.Matchers.containsString)63 CountDownLatch (java.util.concurrent.CountDownLatch)61 PlainActionFuture (org.opensearch.action.support.PlainActionFuture)58 HashSet (java.util.HashSet)56