use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class SimpleNodesInfoIT method testNodesInfosTotalIndexingBuffer.
public void testNodesInfosTotalIndexingBuffer() throws Exception {
List<String> nodesIds = internalCluster().startNodes(2);
final String node_1 = nodesIds.get(0);
final String node_2 = nodesIds.get(1);
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForGreenStatus().setWaitForNodes("2").get();
logger.info("--> done cluster_health, status {}", clusterHealth.getStatus());
String server1NodeId = internalCluster().getInstance(ClusterService.class, node_1).state().nodes().getLocalNodeId();
String server2NodeId = internalCluster().getInstance(ClusterService.class, node_2).state().nodes().getLocalNodeId();
logger.info("--> started nodes: {} and {}", server1NodeId, server2NodeId);
NodesInfoResponse response = client().admin().cluster().prepareNodesInfo().execute().actionGet();
assertThat(response.getNodes().size(), is(2));
assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer());
assertThat(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer());
assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
// again, using only the indices flag
response = client().admin().cluster().prepareNodesInfo().clear().setIndices(true).execute().actionGet();
assertThat(response.getNodes().size(), is(2));
assertThat(response.getNodesMap().get(server1NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer());
assertThat(response.getNodesMap().get(server1NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
assertThat(response.getNodesMap().get(server2NodeId), notNullValue());
assertNotNull(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer());
assertThat(response.getNodesMap().get(server2NodeId).getTotalIndexingBuffer().getBytes(), greaterThan(0L));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class Netty4TransportIT method testThatConnectionFailsAsIntended.
public void testThatConnectionFailsAsIntended() throws Exception {
Client transportClient = internalCluster().transportClient();
ClusterHealthResponse clusterIndexHealths = transportClient.admin().cluster().prepareHealth().get();
assertThat(clusterIndexHealths.getStatus(), is(ClusterHealthStatus.GREEN));
try {
transportClient.filterWithHeader(Collections.singletonMap("ERROR", "MY MESSAGE")).admin().cluster().prepareHealth().get();
fail("Expected exception, but didn't happen");
} catch (ElasticsearchException e) {
assertThat(e.getMessage(), containsString("MY MESSAGE"));
assertThat(channelProfileName, is(TransportSettings.DEFAULT_PROFILE));
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class CorruptedFileIT method testCorruptionOnNetworkLayer.
/**
* Tests corruption that happens on the network layer and that the primary does not get affected by corruption that happens on the way
* to the replica. The file on disk stays uncorrupted
*/
public void testCorruptionOnNetworkLayer() throws ExecutionException, InterruptedException {
int numDocs = scaledRandomIntBetween(100, 1000);
internalCluster().ensureAtLeastNumDataNodes(2);
if (cluster().numDataNodes() < 3) {
internalCluster().startNode(Settings.builder().put(Node.NODE_DATA_SETTING.getKey(), true).put(Node.NODE_MASTER_SETTING.getKey(), false));
}
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
List<NodeStats> dataNodeStats = new ArrayList<>();
for (NodeStats stat : nodeStats.getNodes()) {
if (stat.getNode().isDataNode()) {
dataNodeStats.add(stat);
}
}
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
Collections.shuffle(dataNodeStats, random());
NodeStats primariesNode = dataNodeStats.get(0);
NodeStats unluckyNode = dataNodeStats.get(1);
assertAcked(prepareCreate("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "0").put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, // don't go crazy here it must recovery fast
between(1, 4)).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false).put("index.routing.allocation.include._name", primariesNode.getNode().getName()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
ensureGreen();
IndexRequestBuilder[] builders = new IndexRequestBuilder[numDocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test", "type").setSource("field", "value");
}
indexRandom(true, builders);
ensureGreen();
assertAllSuccessful(client().admin().indices().prepareFlush().setForce(true).execute().actionGet());
// we have to flush at least once here since we don't corrupt the translog
SearchResponse countResponse = client().prepareSearch().setSize(0).get();
assertHitCount(countResponse, numDocs);
final boolean truncate = randomBoolean();
for (NodeStats dataNode : dataNodeStats) {
MockTransportService mockTransportService = ((MockTransportService) internalCluster().getInstance(TransportService.class, dataNode.getNode().getName()));
mockTransportService.addDelegate(internalCluster().getInstance(TransportService.class, unluckyNode.getNode().getName()), new MockTransportService.DelegateTransport(mockTransportService.original()) {
@Override
protected void sendRequest(Connection connection, long requestId, String action, TransportRequest request, TransportRequestOptions options) throws IOException {
if (action.equals(PeerRecoveryTargetService.Actions.FILE_CHUNK)) {
RecoveryFileChunkRequest req = (RecoveryFileChunkRequest) request;
if (truncate && req.length() > 1) {
BytesRef bytesRef = req.content().toBytesRef();
BytesArray array = new BytesArray(bytesRef.bytes, bytesRef.offset, (int) req.length() - 1);
request = new RecoveryFileChunkRequest(req.recoveryId(), req.shardId(), req.metadata(), req.position(), array, req.lastChunk(), req.totalTranslogOps(), req.sourceThrottleTimeInNanos());
} else {
assert req.content().toBytesRef().bytes == req.content().toBytesRef().bytes : "no internal reference!!";
final byte[] array = req.content().toBytesRef().bytes;
int i = randomIntBetween(0, req.content().length() - 1);
// flip one byte in the content
array[i] = (byte) ~array[i];
}
}
super.sendRequest(connection, requestId, action, request, options);
}
});
}
Settings build = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, "1").put("index.routing.allocation.include._name", "*").build();
client().admin().indices().prepareUpdateSettings("test").setSettings(build).get();
client().admin().cluster().prepareReroute().get();
ClusterHealthResponse actionGet = client().admin().cluster().health(Requests.clusterHealthRequest("test").waitForGreenStatus()).actionGet();
if (actionGet.isTimedOut()) {
logger.info("ensureGreen timed out, cluster state:\n{}\n{}", client().admin().cluster().prepareState().get().getState(), client().admin().cluster().preparePendingClusterTasks().get());
assertThat("timed out waiting for green state", actionGet.isTimedOut(), equalTo(false));
}
// we are green so primaries got not corrupted.
// ensure that no shard is actually allocated on the unlucky node
ClusterStateResponse clusterStateResponse = client().admin().cluster().prepareState().get();
for (IndexShardRoutingTable table : clusterStateResponse.getState().getRoutingTable().index("test")) {
for (ShardRouting routing : table) {
if (unluckyNode.getNode().getId().equals(routing.currentNodeId())) {
assertThat(routing.state(), not(equalTo(ShardRoutingState.STARTED)));
assertThat(routing.state(), not(equalTo(ShardRoutingState.RELOCATING)));
}
}
}
final int numIterations = scaledRandomIntBetween(5, 20);
for (int i = 0; i < numIterations; i++) {
SearchResponse response = client().prepareSearch().setSize(numDocs).get();
assertHitCount(response, numDocs);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class IndexLifecycleActionIT method testIndexLifecycleActionsWith11Shards1Backup.
public void testIndexLifecycleActionsWith11Shards1Backup() throws Exception {
Settings settings = Settings.builder().put(indexSettings()).put(SETTING_NUMBER_OF_SHARDS, 11).put(SETTING_NUMBER_OF_REPLICAS, 1).build();
// start one server
logger.info("Starting sever1");
final String server_1 = internalCluster().startNode();
final String node1 = getLocalNodeId(server_1);
logger.info("Creating index [test]");
CreateIndexResponse createIndexResponse = client().admin().indices().create(createIndexRequest("test").settings(settings)).actionGet();
assertAcked(createIndexResponse);
ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
RoutingNode routingNodeEntry1 = clusterState.getRoutingNodes().node(node1);
assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
logger.info("Starting server2");
// start another server
String server_2 = internalCluster().startNode();
// first wait for 2 nodes in the cluster
logger.info("Waiting for replicas to be assigned");
ClusterHealthResponse clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
final String node2 = getLocalNodeId(server_2);
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
client().admin().cluster().prepareReroute().execute().actionGet();
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2").waitForNoRelocatingShards(true)).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(2));
assertThat(clusterHealth.getInitializingShards(), equalTo(0));
assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
assertThat(clusterHealth.getActiveShards(), equalTo(22));
assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
clusterState = client().admin().cluster().prepareState().get().getState();
assertNodesPresent(clusterState.getRoutingNodes(), node1, node2);
routingNodeEntry1 = clusterState.getRoutingNodes().node(node1);
assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), equalTo(11));
RoutingNode routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
assertThat(routingNodeEntry2.numberOfShardsWithState(INITIALIZING), equalTo(0));
assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
logger.info("Starting server3");
// start another server
String server_3 = internalCluster().startNode();
// first wait for 3 nodes in the cluster
logger.info("Waiting for replicas to be assigned");
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3")).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
final String node3 = getLocalNodeId(server_3);
// explicitly call reroute, so shards will get relocated to the new node (we delay it in ES in case other nodes join)
client().admin().cluster().prepareReroute().execute().actionGet();
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("3").waitForNoRelocatingShards(true)).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getNumberOfDataNodes(), equalTo(3));
assertThat(clusterHealth.getInitializingShards(), equalTo(0));
assertThat(clusterHealth.getUnassignedShards(), equalTo(0));
assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
assertThat(clusterHealth.getActiveShards(), equalTo(22));
assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
clusterState = client().admin().cluster().prepareState().get().getState();
assertNodesPresent(clusterState.getRoutingNodes(), node1, node2, node3);
routingNodeEntry1 = clusterState.getRoutingNodes().node(node1);
routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
RoutingNode routingNodeEntry3 = clusterState.getRoutingNodes().node(node3);
assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED) + routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
assertThat(routingNodeEntry1.numberOfShardsWithState(RELOCATING), equalTo(0));
assertThat(routingNodeEntry1.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), anyOf(equalTo(7), equalTo(8)));
assertThat(routingNodeEntry3.numberOfShardsWithState(INITIALIZING), equalTo(0));
assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(7));
logger.info("Closing server1");
// kill the first server
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(server_1));
// verify health
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNodes("2")).actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
client().admin().cluster().prepareReroute().get();
clusterHealth = client().admin().cluster().health(clusterHealthRequest().waitForGreenStatus().waitForNoRelocatingShards(true).waitForNodes("2")).actionGet();
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getRelocatingShards(), equalTo(0));
assertThat(clusterHealth.getActiveShards(), equalTo(22));
assertThat(clusterHealth.getActivePrimaryShards(), equalTo(11));
clusterState = client().admin().cluster().prepareState().get().getState();
assertNodesPresent(clusterState.getRoutingNodes(), node3, node2);
routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
routingNodeEntry3 = clusterState.getRoutingNodes().node(node3);
assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED) + routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(22));
assertThat(routingNodeEntry2.numberOfShardsWithState(RELOCATING), equalTo(0));
assertThat(routingNodeEntry2.numberOfShardsWithState(STARTED), equalTo(11));
assertThat(routingNodeEntry3.numberOfShardsWithState(RELOCATING), equalTo(0));
assertThat(routingNodeEntry3.numberOfShardsWithState(STARTED), equalTo(11));
logger.info("Deleting index [test]");
// last, lets delete the index
DeleteIndexResponse deleteIndexResponse = client().admin().indices().prepareDelete("test").execute().actionGet();
assertThat(deleteIndexResponse.isAcknowledged(), equalTo(true));
clusterState = client().admin().cluster().prepareState().get().getState();
assertNodesPresent(clusterState.getRoutingNodes(), node3, node2);
routingNodeEntry2 = clusterState.getRoutingNodes().node(node2);
assertThat(routingNodeEntry2.isEmpty(), equalTo(true));
routingNodeEntry3 = clusterState.getRoutingNodes().node(node3);
assertThat(routingNodeEntry3.isEmpty(), equalTo(true));
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class MinimumMasterNodesIT method testMultipleNodesShutdownNonMasterNodes.
public void testMultipleNodesShutdownNonMasterNodes() throws Exception {
Settings settings = Settings.builder().put("discovery.zen.minimum_master_nodes", 3).put(ZenDiscovery.PING_TIMEOUT_SETTING.getKey(), "1s").put("discovery.initial_state_timeout", "500ms").build();
logger.info("--> start first 2 nodes");
internalCluster().startNodes(2, settings);
ClusterState state;
assertBusy(() -> {
for (Client client : clients()) {
ClusterState state1 = client.admin().cluster().prepareState().setLocal(true).execute().actionGet().getState();
assertThat(state1.blocks().hasGlobalBlock(DiscoverySettings.NO_MASTER_BLOCK_ID), equalTo(true));
}
});
logger.info("--> start two more nodes");
internalCluster().startNodes(2, settings);
ensureGreen();
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForNodes("4").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.nodes().getSize(), equalTo(4));
createIndex("test");
NumShards numShards = getNumShards("test");
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", "value").execute().actionGet();
}
ensureGreen();
// make sure that all shards recovered before trying to flush
assertThat(client().admin().cluster().prepareHealth("test").setWaitForActiveShards(numShards.totalNumShards).execute().actionGet().isTimedOut(), equalTo(false));
// flush for simpler debugging
client().admin().indices().prepareFlush().execute().actionGet();
refresh();
logger.info("--> verify we the data back");
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
}
internalCluster().stopRandomNonMasterNode();
internalCluster().stopRandomNonMasterNode();
logger.info("--> verify that there is no master anymore on remaining nodes");
// spin here to wait till the state is set
assertNoMasterBlockOnAllNodes();
logger.info("--> start back the 2 nodes ");
String[] newNodes = internalCluster().startNodes(2, settings).stream().toArray(String[]::new);
ensureGreen();
clusterHealthResponse = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
state = client().admin().cluster().prepareState().execute().actionGet().getState();
assertThat(state.nodes().getSize(), equalTo(4));
// we prefer to elect up and running nodes
assertThat(state.nodes().getMasterNodeId(), not(isOneOf(newNodes)));
logger.info("--> verify we the data back");
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet(), 100);
}
}
Aggregations