use of org.opensearch.cluster.node.DiscoveryNodes in project OpenSearch by opensearch-project.
the class ClusterApplierServiceTests method testLocalNodeMasterListenerCallbacks.
public void testLocalNodeMasterListenerCallbacks() {
TimedClusterApplierService timedClusterApplierService = createTimedClusterService(false);
AtomicBoolean isMaster = new AtomicBoolean();
timedClusterApplierService.addLocalNodeMasterListener(new LocalNodeMasterListener() {
@Override
public void onMaster() {
isMaster.set(true);
}
@Override
public void offMaster() {
isMaster.set(false);
}
});
ClusterState state = timedClusterApplierService.state();
DiscoveryNodes nodes = state.nodes();
DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
setState(timedClusterApplierService, state);
assertThat(isMaster.get(), is(true));
nodes = state.nodes();
nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(null);
state = ClusterState.builder(state).blocks(ClusterBlocks.builder().addGlobalBlock(NoMasterBlockService.NO_MASTER_BLOCK_WRITES)).nodes(nodesBuilder).build();
setState(timedClusterApplierService, state);
assertThat(isMaster.get(), is(false));
nodesBuilder = DiscoveryNodes.builder(nodes).masterNodeId(nodes.getLocalNodeId());
state = ClusterState.builder(state).blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).nodes(nodesBuilder).build();
setState(timedClusterApplierService, state);
assertThat(isMaster.get(), is(true));
timedClusterApplierService.close();
}
use of org.opensearch.cluster.node.DiscoveryNodes in project OpenSearch by opensearch-project.
the class GatewayAllocator method ensureAsyncFetchStorePrimaryRecency.
/**
* Clear the fetched data for the primary to ensure we do not cancel recoveries based on excessively stale data.
*/
private void ensureAsyncFetchStorePrimaryRecency(RoutingAllocation allocation) {
DiscoveryNodes nodes = allocation.nodes();
if (hasNewNodes(nodes)) {
final Set<String> newEphemeralIds = StreamSupport.stream(nodes.getDataNodes().spliterator(), false).map(node -> node.value.getEphemeralId()).collect(Collectors.toSet());
// Invalidate the cache if a data node has been added to the cluster. This ensures that we do not cancel a recovery if a node
// drops out, we fetch the shard data, then some indexing happens and then the node rejoins the cluster again. There are other
// ways we could decide to cancel a recovery based on stale data (e.g. changing allocation filters or a primary failure) but
// making the wrong decision here is not catastrophic so we only need to cover the common case.
logger.trace(() -> new ParameterizedMessage("new nodes {} found, clearing primary async-fetch-store cache", Sets.difference(newEphemeralIds, lastSeenEphemeralIds)));
asyncFetchStore.values().forEach(fetch -> clearCacheForPrimary(fetch, allocation));
// recalc to also (lazily) clear out old nodes.
this.lastSeenEphemeralIds = newEphemeralIds;
}
}
use of org.opensearch.cluster.node.DiscoveryNodes in project OpenSearch by opensearch-project.
the class IndicesClusterStateService method createOrUpdateShards.
private void createOrUpdateShards(final ClusterState state) {
RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId());
if (localRoutingNode == null) {
return;
}
DiscoveryNodes nodes = state.nodes();
RoutingTable routingTable = state.routingTable();
for (final ShardRouting shardRouting : localRoutingNode) {
ShardId shardId = shardRouting.shardId();
if (failedShardsCache.containsKey(shardId) == false) {
AllocatedIndex<? extends Shard> indexService = indicesService.indexService(shardId.getIndex());
assert indexService != null : "index " + shardId.getIndex() + " should have been created by createIndices";
Shard shard = indexService.getShardOrNull(shardId.id());
if (shard == null) {
assert shardRouting.initializing() : shardRouting + " should have been removed by failMissingShards";
createShard(nodes, routingTable, shardRouting, state);
} else {
updateShard(nodes, shardRouting, shard, routingTable, state);
}
}
}
}
use of org.opensearch.cluster.node.DiscoveryNodes in project OpenSearch by opensearch-project.
the class ClusterRerouteResponseTests method testToXContent.
public void testToXContent() throws IOException {
DiscoveryNode node0 = new DiscoveryNode("node0", new TransportAddress(TransportAddress.META_ADDRESS, 9000), Version.CURRENT);
DiscoveryNodes nodes = new DiscoveryNodes.Builder().add(node0).masterNodeId(node0.getId()).build();
IndexMetadata indexMetadata = IndexMetadata.builder("index").settings(Settings.builder().put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), true).put(IndexSettings.MAX_SCRIPT_FIELDS_SETTING.getKey(), 10).put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.SETTING_VERSION_CREATED, Version.CURRENT).build()).build();
ImmutableOpenMap.Builder<String, IndexMetadata> openMapBuilder = ImmutableOpenMap.builder();
openMapBuilder.put("index", indexMetadata);
Metadata metadata = Metadata.builder().indices(openMapBuilder.build()).build();
ClusterState clusterState = ClusterState.builder(new ClusterName("test")).nodes(nodes).metadata(metadata).build();
RoutingExplanations routingExplanations = new RoutingExplanations();
routingExplanations.add(new RerouteExplanation(new AllocateReplicaAllocationCommand("index", 0, "node0"), Decision.YES));
ClusterRerouteResponse clusterRerouteResponse = new ClusterRerouteResponse(true, clusterState, routingExplanations);
{
XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint();
clusterRerouteResponse.toXContent(builder, ToXContent.EMPTY_PARAMS);
assertEquals("{\n" + " \"acknowledged\" : true,\n" + " \"state\" : {\n" + " \"cluster_uuid\" : \"_na_\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" + " \"master_node\" : \"node0\",\n" + " \"cluster_manager_node\" : \"node0\",\n" + " \"blocks\" : { },\n" + " \"nodes\" : {\n" + " \"node0\" : {\n" + " \"name\" : \"\",\n" + " \"ephemeral_id\" : \"" + node0.getEphemeralId() + "\",\n" + " \"transport_address\" : \"0.0.0.0:9000\",\n" + " \"attributes\" : { }\n" + " }\n" + " },\n" + " \"metadata\" : {\n" + " \"cluster_uuid\" : \"_na_\",\n" + " \"cluster_uuid_committed\" : false,\n" + " \"cluster_coordination\" : {\n" + " \"term\" : 0,\n" + " \"last_committed_config\" : [ ],\n" + " \"last_accepted_config\" : [ ],\n" + " \"voting_config_exclusions\" : [ ]\n" + " },\n" + " \"templates\" : { },\n" + " \"indices\" : {\n" + " \"index\" : {\n" + " \"version\" : 1,\n" + " \"mapping_version\" : 1,\n" + " \"settings_version\" : 1,\n" + " \"aliases_version\" : 1,\n" + " \"routing_num_shards\" : 1,\n" + " \"state\" : \"open\",\n" + " \"settings\" : {\n" + " \"index\" : {\n" + " \"shard\" : {\n" + " \"check_on_startup\" : \"true\"\n" + " },\n" + " \"number_of_shards\" : \"1\",\n" + " \"number_of_replicas\" : \"0\",\n" + " \"version\" : {\n" + " \"created\" : \"" + Version.CURRENT.id + "\"\n" + " },\n" + " \"max_script_fields\" : \"10\"\n" + " }\n" + " },\n" + " \"mappings\" : { },\n" + " \"aliases\" : [ ],\n" + " \"primary_terms\" : {\n" + " \"0\" : 0\n" + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [ ]\n" + " },\n" + " \"rollover_info\" : { },\n" + " \"system\" : false\n" + " }\n" + " },\n" + " \"index-graveyard\" : {\n" + " \"tombstones\" : [ ]\n" + " }\n" + " },\n" + " \"routing_table\" : {\n" + " \"indices\" : { }\n" + " },\n" + " \"routing_nodes\" : {\n" + " \"unassigned\" : [ ],\n" + " \"nodes\" : {\n" + " \"node0\" : [ ]\n" + " }\n" + " }\n" + " }\n" + "}", Strings.toString(builder));
}
{
XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint();
Map<String, String> params = new HashMap<>();
params.put("explain", "true");
params.put("metric", "version,cluster_manager_node");
clusterRerouteResponse.toXContent(builder, new ToXContent.MapParams(params));
assertEquals("{\n" + " \"acknowledged\" : true,\n" + " \"state\" : {\n" + " \"cluster_uuid\" : \"_na_\",\n" + " \"version\" : 0,\n" + " \"state_uuid\" : \"" + clusterState.stateUUID() + "\",\n" + " \"cluster_manager_node\" : \"node0\"\n" + " },\n" + " \"explanations\" : [\n" + " {\n" + " \"command\" : \"allocate_replica\",\n" + " \"parameters\" : {\n" + " \"index\" : \"index\",\n" + " \"shard\" : 0,\n" + " \"node\" : \"node0\"\n" + " },\n" + " \"decisions\" : [\n" + " {\n" + " \"decider\" : null,\n" + " \"decision\" : \"YES\",\n" + " \"explanation\" : \"none\"\n" + " }\n" + " ]\n" + " }\n" + " ]\n" + "}", Strings.toString(builder));
}
{
XContentBuilder builder = JsonXContent.contentBuilder().prettyPrint();
Map<String, String> params = new HashMap<>();
params.put("metric", "metadata");
params.put("settings_filter", "index.number*,index.version.created");
clusterRerouteResponse.toXContent(builder, new ToXContent.MapParams(params));
assertEquals("{\n" + " \"acknowledged\" : true,\n" + " \"state\" : {\n" + " \"cluster_uuid\" : \"_na_\",\n" + " \"metadata\" : {\n" + " \"cluster_uuid\" : \"_na_\",\n" + " \"cluster_uuid_committed\" : false,\n" + " \"cluster_coordination\" : {\n" + " \"term\" : 0,\n" + " \"last_committed_config\" : [ ],\n" + " \"last_accepted_config\" : [ ],\n" + " \"voting_config_exclusions\" : [ ]\n" + " },\n" + " \"templates\" : { },\n" + " \"indices\" : {\n" + " \"index\" : {\n" + " \"version\" : 1,\n" + " \"mapping_version\" : 1,\n" + " \"settings_version\" : 1,\n" + " \"aliases_version\" : 1,\n" + " \"routing_num_shards\" : 1,\n" + " \"state\" : \"open\",\n" + " \"settings\" : {\n" + " \"index\" : {\n" + " \"max_script_fields\" : \"10\",\n" + " \"shard\" : {\n" + " \"check_on_startup\" : \"true\"\n" + " }\n" + " }\n" + " },\n" + " \"mappings\" : { },\n" + " \"aliases\" : [ ],\n" + " \"primary_terms\" : {\n" + " \"0\" : 0\n" + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [ ]\n" + " },\n" + " \"rollover_info\" : { },\n" + " \"system\" : false\n" + " }\n" + " },\n" + " \"index-graveyard\" : {\n" + " \"tombstones\" : [ ]\n" + " }\n" + " }\n" + " }\n" + "}", Strings.toString(builder));
}
}
use of org.opensearch.cluster.node.DiscoveryNodes in project OpenSearch by opensearch-project.
the class IndexRecoveryIT method testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint.
public void testUsesFileBasedRecoveryIfRetentionLeaseAheadOfGlobalCheckpoint() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
String indexName = "test-index";
createIndex(indexName, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).put(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), true).put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "12h").build());
indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(0, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()));
ensureGreen(indexName);
final ShardId shardId = new ShardId(resolveIndex(indexName), 0);
final DiscoveryNodes discoveryNodes = clusterService().state().nodes();
final IndexShardRoutingTable indexShardRoutingTable = clusterService().state().routingTable().shardRoutingTable(shardId);
final IndexShard primary = internalCluster().getInstance(IndicesService.class, discoveryNodes.get(indexShardRoutingTable.primaryShard().currentNodeId()).getName()).getShardOrNull(shardId);
final ShardRouting replicaShardRouting = indexShardRoutingTable.replicaShards().get(0);
internalCluster().restartNode(discoveryNodes.get(replicaShardRouting.currentNodeId()).getName(), new InternalTestCluster.RestartCallback() {
@Override
public Settings onNodeStopped(String nodeName) throws Exception {
assertFalse(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(discoveryNodes.getSize() - 1)).setWaitForEvents(Priority.LANGUID).get().isTimedOut());
indexRandom(randomBoolean(), randomBoolean(), randomBoolean(), IntStream.range(0, between(1, 100)).mapToObj(n -> client().prepareIndex(indexName).setSource("num", n)).collect(toList()));
// We do not guarantee that the replica can recover locally all the way to its own global checkpoint before starting
// to recover from the primary, so we must be careful not to perform an operations-based recovery if this would require
// some operations that are not being retained. Emulate this by advancing the lease ahead of the replica's GCP:
primary.renewRetentionLease(ReplicationTracker.getPeerRecoveryRetentionLeaseId(replicaShardRouting), primary.seqNoStats().getMaxSeqNo() + 1, ReplicationTracker.PEER_RECOVERY_RETENTION_LEASE_SOURCE);
return super.onNodeStopped(nodeName);
}
});
ensureGreen(indexName);
// noinspection OptionalGetWithoutIsPresent because it fails the test if absent
final RecoveryState recoveryState = client().admin().indices().prepareRecoveries(indexName).get().shardRecoveryStates().get(indexName).stream().filter(rs -> rs.getPrimary() == false).findFirst().get();
assertThat(recoveryState.getIndex().totalFileCount(), greaterThan(0));
}
Aggregations