use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class CloseIndexDisableCloseAllIT method assertIndexIsClosed.
private void assertIndexIsClosed(String... indices) {
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().execute().actionGet();
for (String index : indices) {
IndexMetadata indexMetadata = clusterStateResponse.getState().metadata().indices().get(index);
assertNotNull(indexMetadata);
assertEquals(IndexMetadata.State.CLOSE, indexMetadata.getState());
}
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class SharedClusterSnapshotRestoreIT method testDataFileFailureDuringRestore.
public void testDataFileFailureDuringRestore() throws Exception {
disableRepoConsistencyCheck("This test intentionally leaves a broken repository");
Path repositoryLocation = randomRepoPath();
Client client = client();
createRepository("test-repo", "fs", repositoryLocation);
prepareCreate("test-idx").setSettings(Settings.builder().put("index.allocation.max_retries", Integer.MAX_VALUE)).get();
ensureGreen();
final NumShards numShards = getNumShards("test-idx");
indexRandomDocs("test-idx", 100);
createSnapshot("test-repo", "test-snap", Collections.singletonList("test-idx"));
createRepository("test-repo", "mock", Settings.builder().put("location", repositoryLocation).put("random", randomAlphaOfLength(10)).put("random_data_file_io_exception_rate", 0.3));
// Test restore after index deletion
logger.info("--> delete index");
cluster().wipeIndices("test-idx");
logger.info("--> restore index after deletion");
final RestoreSnapshotResponse restoreResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).get();
logger.info("--> total number of simulated failures during restore: [{}]", getFailureCount("test-repo"));
final RestoreInfo restoreInfo = restoreResponse.getRestoreInfo();
assertThat(restoreInfo.totalShards(), equalTo(numShards.numPrimaries));
if (restoreInfo.successfulShards() == restoreInfo.totalShards()) {
// All shards were restored, we must find the exact number of hits
assertDocCount("test-idx", 100L);
} else {
// One or more shards failed to be restored. This can happen when there is
// only 1 data node: a shard failed because of the random IO exceptions
// during restore and then we don't allow the shard to be assigned on the
// same node again during the same reroute operation. Then another reroute
// operation is scheduled, but the RestoreInProgressAllocationDecider will
// block the shard to be assigned again because it failed during restore.
final ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().get();
assertEquals(1, clusterStateResponse.getState().getNodes().getDataNodes().size());
assertEquals(restoreInfo.failedShards(), clusterStateResponse.getState().getRoutingTable().shardsWithState(ShardRoutingState.UNASSIGNED).size());
}
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class RepositoriesIT method testRepositoryCreation.
public void testRepositoryCreation() throws Exception {
Client client = client();
Path location = randomRepoPath();
createRepository("test-repo-1", "fs", location);
logger.info("--> verify the repository");
int numberOfFiles = FileSystemUtils.files(location).length;
VerifyRepositoryResponse verifyRepositoryResponse = client.admin().cluster().prepareVerifyRepository("test-repo-1").get();
assertThat(verifyRepositoryResponse.getNodes().size(), equalTo(cluster().numDataAndMasterNodes()));
logger.info("--> verify that we didn't leave any files as a result of verification");
assertThat(FileSystemUtils.files(location).length, equalTo(numberOfFiles));
logger.info("--> check that repository is really there");
ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get();
Metadata metadata = clusterStateResponse.getState().getMetadata();
RepositoriesMetadata repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE);
assertThat(repositoriesMetadata, notNullValue());
assertThat(repositoriesMetadata.repository("test-repo-1"), notNullValue());
assertThat(repositoriesMetadata.repository("test-repo-1").type(), equalTo("fs"));
logger.info("--> creating another repository");
createRepository("test-repo-2", "fs");
logger.info("--> check that both repositories are in cluster state");
clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get();
metadata = clusterStateResponse.getState().getMetadata();
repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE);
assertThat(repositoriesMetadata, notNullValue());
assertThat(repositoriesMetadata.repositories().size(), equalTo(2));
assertThat(repositoriesMetadata.repository("test-repo-1"), notNullValue());
assertThat(repositoriesMetadata.repository("test-repo-1").type(), equalTo("fs"));
assertThat(repositoriesMetadata.repository("test-repo-2"), notNullValue());
assertThat(repositoriesMetadata.repository("test-repo-2").type(), equalTo("fs"));
logger.info("--> check that both repositories can be retrieved by getRepositories query");
GetRepositoriesResponse repositoriesResponse = client.admin().cluster().prepareGetRepositories(randomFrom("_all", "*", "test-repo-*")).get();
assertThat(repositoriesResponse.repositories().size(), equalTo(2));
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-1"), notNullValue());
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
logger.info("--> check that trying to create a repository with the same settings repeatedly does not update cluster state");
String beforeStateUuid = clusterStateResponse.getState().stateUUID();
assertThat(client.admin().cluster().preparePutRepository("test-repo-1").setType("fs").setSettings(Settings.builder().put("location", location)).get().isAcknowledged(), equalTo(true));
assertEquals(beforeStateUuid, client.admin().cluster().prepareState().clear().get().getState().stateUUID());
logger.info("--> delete repository test-repo-1");
client.admin().cluster().prepareDeleteRepository("test-repo-1").get();
repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
assertThat(repositoriesResponse.repositories().size(), equalTo(1));
assertThat(findRepository(repositoriesResponse.repositories(), "test-repo-2"), notNullValue());
logger.info("--> delete repository test-repo-2");
client.admin().cluster().prepareDeleteRepository("test-repo-2").get();
repositoriesResponse = client.admin().cluster().prepareGetRepositories().get();
assertThat(repositoriesResponse.repositories().size(), equalTo(0));
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class RestShardsActionTests method testBuildTable.
public void testBuildTable() {
final int numShards = randomIntBetween(1, 5);
DiscoveryNode localNode = new DiscoveryNode("local", buildNewFakeTransportAddress(), Version.CURRENT);
List<ShardRouting> shardRoutings = new ArrayList<>(numShards);
Map<ShardRouting, ShardStats> shardStatsMap = new HashMap<>();
String index = "index";
for (int i = 0; i < numShards; i++) {
ShardRoutingState shardRoutingState = ShardRoutingState.fromValue((byte) randomIntBetween(2, 3));
ShardRouting shardRouting = TestShardRouting.newShardRouting(index, i, localNode.getId(), randomBoolean(), shardRoutingState);
Path path = createTempDir().resolve("indices").resolve(shardRouting.shardId().getIndex().getUUID()).resolve(String.valueOf(shardRouting.shardId().id()));
ShardStats shardStats = new ShardStats(shardRouting, new ShardPath(false, path, path, shardRouting.shardId()), null, null, null, null);
shardStatsMap.put(shardRouting, shardStats);
shardRoutings.add(shardRouting);
}
IndexStats indexStats = mock(IndexStats.class);
when(indexStats.getPrimaries()).thenReturn(new CommonStats());
when(indexStats.getTotal()).thenReturn(new CommonStats());
IndicesStatsResponse stats = mock(IndicesStatsResponse.class);
when(stats.asMap()).thenReturn(shardStatsMap);
DiscoveryNodes discoveryNodes = mock(DiscoveryNodes.class);
when(discoveryNodes.get(localNode.getId())).thenReturn(localNode);
ClusterStateResponse state = mock(ClusterStateResponse.class);
RoutingTable routingTable = mock(RoutingTable.class);
when(routingTable.allShards()).thenReturn(shardRoutings);
ClusterState clusterState = mock(ClusterState.class);
when(clusterState.routingTable()).thenReturn(routingTable);
when(clusterState.nodes()).thenReturn(discoveryNodes);
when(state.getState()).thenReturn(clusterState);
final RestShardsAction action = new RestShardsAction();
final Table table = action.buildTable(new FakeRestRequest(), state, stats);
// now, verify the table is correct
List<Table.Cell> headers = table.getHeaders();
assertThat(headers.get(0).value, equalTo("index"));
assertThat(headers.get(1).value, equalTo("shard"));
assertThat(headers.get(2).value, equalTo("prirep"));
assertThat(headers.get(3).value, equalTo("state"));
assertThat(headers.get(4).value, equalTo("docs"));
assertThat(headers.get(5).value, equalTo("store"));
assertThat(headers.get(6).value, equalTo("ip"));
assertThat(headers.get(7).value, equalTo("id"));
assertThat(headers.get(8).value, equalTo("node"));
final List<List<Table.Cell>> rows = table.getRows();
assertThat(rows.size(), equalTo(numShards));
Iterator<ShardRouting> shardRoutingsIt = shardRoutings.iterator();
for (final List<Table.Cell> row : rows) {
ShardRouting shardRouting = shardRoutingsIt.next();
ShardStats shardStats = shardStatsMap.get(shardRouting);
assertThat(row.get(0).value, equalTo(shardRouting.getIndexName()));
assertThat(row.get(1).value, equalTo(shardRouting.getId()));
assertThat(row.get(2).value, equalTo(shardRouting.primary() ? "p" : "r"));
assertThat(row.get(3).value, equalTo(shardRouting.state()));
assertThat(row.get(6).value, equalTo(localNode.getHostAddress()));
assertThat(row.get(7).value, equalTo(localNode.getId()));
assertThat(row.get(69).value, equalTo(shardStats.getDataPath()));
assertThat(row.get(70).value, equalTo(shardStats.getStatePath()));
}
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class SnapshotResiliencyTests method testSnapshotPrimaryRelocations.
/**
* Simulates concurrent restarts of data and master nodes as well as relocating a primary shard, while starting and subsequently
* deleting a snapshot.
*/
public void testSnapshotPrimaryRelocations() {
final int masterNodeCount = randomFrom(1, 3, 5);
setupTestCluster(masterNodeCount, randomIntBetween(2, 5));
String repoName = "repo";
String snapshotName = "snapshot";
final String index = "test";
final int shards = randomIntBetween(1, 5);
final TestClusterNodes.TestClusterNode masterNode = testClusterNodes.currentMaster(testClusterNodes.nodes.values().iterator().next().clusterService.state());
final AtomicBoolean createdSnapshot = new AtomicBoolean();
final AdminClient masterAdminClient = masterNode.client.admin();
final StepListener<ClusterStateResponse> clusterStateResponseStepListener = new StepListener<>();
continueOrDie(createRepoAndIndex(repoName, index, shards), createIndexResponse -> client().admin().cluster().state(new ClusterStateRequest(), clusterStateResponseStepListener));
continueOrDie(clusterStateResponseStepListener, clusterStateResponse -> {
final ShardRouting shardToRelocate = clusterStateResponse.getState().routingTable().allShards(index).get(0);
final TestClusterNodes.TestClusterNode currentPrimaryNode = testClusterNodes.nodeById(shardToRelocate.currentNodeId());
final TestClusterNodes.TestClusterNode otherNode = testClusterNodes.randomDataNodeSafe(currentPrimaryNode.node.getName());
scheduleNow(() -> testClusterNodes.stopNode(currentPrimaryNode));
scheduleNow(new Runnable() {
@Override
public void run() {
final StepListener<ClusterStateResponse> updatedClusterStateResponseStepListener = new StepListener<>();
masterAdminClient.cluster().state(new ClusterStateRequest(), updatedClusterStateResponseStepListener);
continueOrDie(updatedClusterStateResponseStepListener, updatedClusterState -> {
final ShardRouting shardRouting = updatedClusterState.getState().routingTable().shardRoutingTable(shardToRelocate.shardId()).primaryShard();
if (shardRouting.unassigned() && shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.NODE_LEFT) {
if (masterNodeCount > 1) {
scheduleNow(() -> testClusterNodes.stopNode(masterNode));
}
testClusterNodes.randomDataNodeSafe().client.admin().cluster().prepareCreateSnapshot(repoName, snapshotName).execute(ActionListener.wrap(() -> {
createdSnapshot.set(true);
testClusterNodes.randomDataNodeSafe().client.admin().cluster().deleteSnapshot(new DeleteSnapshotRequest(repoName, snapshotName), noopListener());
}));
scheduleNow(() -> testClusterNodes.randomMasterNodeSafe().client.admin().cluster().reroute(new ClusterRerouteRequest().add(new AllocateEmptyPrimaryAllocationCommand(index, shardRouting.shardId().id(), otherNode.node.getName(), true)), noopListener()));
} else {
scheduleSoon(this);
}
});
}
});
});
runUntil(() -> testClusterNodes.randomMasterNode().map(master -> {
if (createdSnapshot.get() == false) {
return false;
}
return master.clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries().isEmpty();
}).orElse(false), TimeUnit.MINUTES.toMillis(1L));
clearDisruptionsAndAwaitSync();
assertTrue(createdSnapshot.get());
assertThat(testClusterNodes.randomDataNodeSafe().clusterService.state().custom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY).entries(), empty());
final Repository repository = testClusterNodes.randomMasterNodeSafe().repositoriesService.repository(repoName);
Collection<SnapshotId> snapshotIds = getRepositoryData(repository).getSnapshotIds();
assertThat(snapshotIds, either(hasSize(1)).or(hasSize(0)));
}
Aggregations