use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class RestShardsAction method doCatRequest.
@Override
public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) {
final String[] indices = Strings.splitStringByCommaToArray(request.param("index"));
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout()));
parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName());
clusterStateRequest.clear().nodes(true).routingTable(true).indices(indices);
return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener<ClusterStateResponse>(channel) {
@Override
public void processResponse(final ClusterStateResponse clusterStateResponse) {
IndicesStatsRequest indicesStatsRequest = new IndicesStatsRequest();
indicesStatsRequest.all();
indicesStatsRequest.indices(indices);
client.admin().indices().stats(indicesStatsRequest, new RestResponseListener<IndicesStatsResponse>(channel) {
@Override
public RestResponse buildResponse(IndicesStatsResponse indicesStatsResponse) throws Exception {
return RestTable.buildResponse(buildTable(request, clusterStateResponse, indicesStatsResponse), channel);
}
});
}
});
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class RestAllocationAction method doCatRequest.
@Override
public RestChannelConsumer doCatRequest(final RestRequest request, final NodeClient client) {
final String[] nodes = Strings.splitStringByCommaToArray(request.param("nodes", "data:true"));
final ClusterStateRequest clusterStateRequest = new ClusterStateRequest();
clusterStateRequest.clear().routingTable(true);
clusterStateRequest.local(request.paramAsBoolean("local", clusterStateRequest.local()));
clusterStateRequest.masterNodeTimeout(request.paramAsTime("cluster_manager_timeout", clusterStateRequest.masterNodeTimeout()));
parseDeprecatedMasterTimeoutParameter(clusterStateRequest, request, deprecationLogger, getName());
return channel -> client.admin().cluster().state(clusterStateRequest, new RestActionListener<ClusterStateResponse>(channel) {
@Override
public void processResponse(final ClusterStateResponse state) {
NodesStatsRequest statsRequest = new NodesStatsRequest(nodes);
statsRequest.timeout(request.param("timeout"));
statsRequest.clear().addMetric(NodesStatsRequest.Metric.FS.metricName()).indices(new CommonStatsFlags(CommonStatsFlags.Flag.Store));
client.admin().cluster().nodesStats(statsRequest, new RestResponseListener<NodesStatsResponse>(channel) {
@Override
public RestResponse buildResponse(NodesStatsResponse stats) throws Exception {
Table tab = buildTable(request, state, stats);
return RestTable.buildResponse(tab, channel);
}
});
}
});
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class GatewayIndexStateIT method testSimpleOpenClose.
public void testSimpleOpenClose() throws Exception {
logger.info("--> starting 2 nodes");
internalCluster().startNodes(2);
logger.info("--> creating test index");
createIndex("test");
NumShards test = getNumShards("test");
logger.info("--> waiting for green status");
ensureGreen();
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN));
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
logger.info("--> indexing a simple document");
client().prepareIndex("test").setId("1").setSource("field1", "value1").get();
logger.info("--> closing test index...");
assertAcked(client().admin().indices().prepareClose("test"));
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE));
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
logger.info("--> verifying that the state is green");
ensureGreen();
logger.info("--> trying to index into a closed index ...");
try {
client().prepareIndex("test").setId("1").setSource("field1", "value1").execute().actionGet();
fail();
} catch (IndexClosedException e) {
// all is well
}
logger.info("--> creating another index (test2) by indexing into it");
client().prepareIndex("test2").setId("1").setSource("field1", "value1").execute().actionGet();
logger.info("--> verifying that the state is green");
ensureGreen();
logger.info("--> opening the first index again...");
assertAcked(client().admin().indices().prepareOpen("test"));
logger.info("--> verifying that the state is green");
ensureGreen();
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN));
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
logger.info("--> trying to get the indexed document on the first index");
GetResponse getResponse = client().prepareGet("test", "1").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
logger.info("--> closing test index...");
assertAcked(client().admin().indices().prepareClose("test"));
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE));
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
logger.info("--> restarting nodes...");
internalCluster().fullRestart();
logger.info("--> waiting for two nodes and green status");
ensureGreen();
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.CLOSE));
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
logger.info("--> trying to index into a closed index ...");
try {
client().prepareIndex("test").setId("1").setSource("field1", "value1").execute().actionGet();
fail();
} catch (IndexClosedException e) {
// all is well
}
logger.info("--> opening index...");
client().admin().indices().prepareOpen("test").execute().actionGet();
logger.info("--> waiting for green status");
ensureGreen();
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
assertThat(stateResponse.getState().metadata().index("test").getState(), equalTo(IndexMetadata.State.OPEN));
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
logger.info("--> trying to get the indexed document on the first round (before close and shutdown)");
getResponse = client().prepareGet("test", "1").execute().actionGet();
assertThat(getResponse.isExists(), equalTo(true));
logger.info("--> indexing a simple document");
client().prepareIndex("test").setId("2").setSource("field1", "value1").execute().actionGet();
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class IndicesLifecycleListenerIT method testBeforeIndexAddedToCluster.
public void testBeforeIndexAddedToCluster() throws Exception {
String node1 = internalCluster().startNode();
String node2 = internalCluster().startNode();
String node3 = internalCluster().startNode();
final AtomicInteger beforeAddedCount = new AtomicInteger(0);
final AtomicInteger allCreatedCount = new AtomicInteger(0);
IndexEventListener listener = new IndexEventListener() {
@Override
public void beforeIndexAddedToCluster(Index index, Settings indexSettings) {
beforeAddedCount.incrementAndGet();
if (MockIndexEventListener.TestPlugin.INDEX_FAIL.get(indexSettings)) {
throw new OpenSearchException("failing on purpose");
}
}
@Override
public void beforeIndexCreated(Index index, Settings indexSettings) {
allCreatedCount.incrementAndGet();
}
};
internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, node1).setNewDelegate(listener);
internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, node2).setNewDelegate(listener);
internalCluster().getInstance(MockIndexEventListener.TestEventListener.class, node3).setNewDelegate(listener);
client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1)).get();
ensureGreen("test");
assertThat("beforeIndexAddedToCluster called only once", beforeAddedCount.get(), equalTo(1));
assertThat("beforeIndexCreated called on each data node", allCreatedCount.get(), greaterThanOrEqualTo(3));
try {
client().admin().indices().prepareCreate("failed").setSettings(Settings.builder().put("index.fail", true)).get();
fail("should have thrown an exception during creation");
} catch (Exception e) {
assertTrue(e.getMessage().contains("failing on purpose"));
ClusterStateResponse resp = client().admin().cluster().prepareState().get();
assertFalse(resp.getState().routingTable().indicesRouting().keys().contains("failed"));
}
}
use of org.opensearch.action.admin.cluster.state.ClusterStateResponse in project OpenSearch by opensearch-project.
the class ShrinkIndexIT method testCreateShrinkIndexFails.
/**
* Tests that we can manually recover from a failed allocation due to shards being moved away etc.
*/
public void testCreateShrinkIndexFails() throws Exception {
internalCluster().ensureAtLeastNumDataNodes(2);
prepareCreate("source").setSettings(Settings.builder().put(indexSettings()).put("number_of_shards", randomIntBetween(2, 7)).put("number_of_replicas", 0)).get();
for (int i = 0; i < 20; i++) {
client().prepareIndex("source").setSource("{\"foo\" : \"bar\", \"i\" : " + i + "}", XContentType.JSON).get();
}
ImmutableOpenMap<String, DiscoveryNode> dataNodes = client().admin().cluster().prepareState().get().getState().nodes().getDataNodes();
assertTrue("at least 2 nodes but was: " + dataNodes.size(), dataNodes.size() >= 2);
DiscoveryNode[] discoveryNodes = dataNodes.values().toArray(DiscoveryNode.class);
String spareNode = discoveryNodes[0].getName();
String mergeNode = discoveryNodes[1].getName();
// ensure all shards are allocated otherwise the ensure green below might not succeed since we require the merge node
// if we change the setting too quickly we will end up with one replica unassigned which can't be assigned anymore due
// to the require._name below.
ensureGreen();
// relocate all shards to one node such that we can merge it.
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode).put("index.blocks.write", true)).get();
ensureGreen();
// now merge source into a single shard index
client().admin().indices().prepareResizeIndex("source", "target").setWaitForActiveShards(ActiveShardCount.NONE).setSettings(Settings.builder().put("index.routing.allocation.exclude._name", // we manually exclude the merge node to forcefully fuck it up
mergeNode).put("index.number_of_replicas", 0).put("index.allocation.max_retries", 1).build()).get();
client().admin().cluster().prepareHealth("target").setWaitForEvents(Priority.LANGUID).get();
// now we move all shards away from the merge node
client().admin().indices().prepareUpdateSettings("source").setSettings(Settings.builder().put("index.routing.allocation.require._name", spareNode).put("index.blocks.write", true)).get();
ensureGreen("source");
client().admin().indices().prepareUpdateSettings(// erase the forcefully fuckup!
"target").setSettings(Settings.builder().putNull("index.routing.allocation.exclude._name")).get();
// wait until it fails
assertBusy(() -> {
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
RoutingTable routingTables = clusterStateResponse.getState().routingTable();
assertTrue(routingTables.index("target").shard(0).getShards().get(0).unassigned());
assertEquals(UnassignedInfo.Reason.ALLOCATION_FAILED, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getReason());
assertEquals(1, routingTables.index("target").shard(0).getShards().get(0).unassignedInfo().getNumFailedAllocations());
});
client().admin().indices().prepareUpdateSettings(// now relocate them all to the right node
"source").setSettings(Settings.builder().put("index.routing.allocation.require._name", mergeNode)).get();
ensureGreen("source");
final InternalClusterInfoService infoService = (InternalClusterInfoService) internalCluster().getInstance(ClusterInfoService.class, internalCluster().getMasterName());
infoService.refresh();
// kick off a retry and wait until it's done!
ClusterRerouteResponse clusterRerouteResponse = client().admin().cluster().prepareReroute().setRetryFailed(true).get();
long expectedShardSize = clusterRerouteResponse.getState().routingTable().index("target").shard(0).getShards().get(0).getExpectedShardSize();
// we support the expected shard size in the allocator to sum up over the source index shards
assertTrue("expected shard size must be set but wasn't: " + expectedShardSize, expectedShardSize > 0);
ensureGreen();
assertHitCount(client().prepareSearch("target").setSize(100).setQuery(new TermsQueryBuilder("foo", "bar")).get(), 20);
}
Aggregations