use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project crate by crate.
the class DecommissioningService method decommission.
private void decommission() {
// fail on new requests so that clients don't use this node anymore
sqlOperations.disable();
/*
* setting this setting will cause the {@link DecommissionAllocationDecider} to prevent allocations onto this node
*
* nodeIds are part of the key to prevent conflicts if other nodes are being decommissioned in parallel
*/
Settings settings = Settings.builder().put(DECOMMISSION_PREFIX + clusterService.localNode().getId(), true).build();
updateSettingsAction.execute(new ClusterUpdateSettingsRequest().transientSettings(settings), new ActionListener<ClusterUpdateSettingsResponse>() {
@Override
public void onResponse(ClusterUpdateSettingsResponse clusterUpdateSettingsResponse) {
// changing settings triggers AllocationService.reroute -> shards will be relocated
// NOTE: it waits for ALL relocating shards, not just those that involve THIS node.
ClusterHealthRequest request = new ClusterHealthRequest().waitForRelocatingShards(0).waitForEvents(Priority.LANGUID).timeout(gracefulStopTimeout);
if (dataAvailability == DataAvailability.FULL) {
request = request.waitForGreenStatus();
} else {
request = request.waitForYellowStatus();
}
final long startTime = System.nanoTime();
healthAction.execute(request, new ActionListener<ClusterHealthResponse>() {
@Override
public void onResponse(ClusterHealthResponse clusterHealthResponse) {
exitIfNoActiveRequests(startTime);
}
@Override
public void onFailure(Throwable e) {
forceStopOrAbort(e);
}
});
}
@Override
public void onFailure(Throwable e) {
logger.error("Couldn't set settings. Graceful shutdown failed", e);
}
});
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class WaitActiveShardCountIT method testReplicationWaitsForActiveShardCount.
public void testReplicationWaitsForActiveShardCount() throws Exception {
CreateIndexResponse createIndexResponse = prepareCreate("test", 1, Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 2)).get();
assertAcked(createIndexResponse);
// indexing, by default, will work (waiting for one shard copy only)
client().prepareIndex("test", "type1", "1").setSource(source("1", "test"), XContentType.JSON).execute().actionGet();
try {
client().prepareIndex("test", "type1", "1").setSource(source("1", "test"), XContentType.JSON).setWaitForActiveShards(// wait for 2 active shard copies
2).setTimeout(timeValueMillis(100)).execute().actionGet();
fail("can't index, does not enough active shard copies");
} catch (UnavailableShardsException e) {
assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
assertThat(e.getMessage(), startsWith("[test][0] Not enough active copies to meet shard count of [2] (have 1, needed 2). Timeout: [100ms], request:"));
// but really, all is well
}
allowNodes("test", 2);
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(2).setWaitForYellowStatus().execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
// this should work, since we now have two
client().prepareIndex("test", "type1", "1").setSource(source("1", "test"), XContentType.JSON).setWaitForActiveShards(2).setTimeout(timeValueSeconds(1)).execute().actionGet();
try {
client().prepareIndex("test", "type1", "1").setSource(source("1", "test"), XContentType.JSON).setWaitForActiveShards(ActiveShardCount.ALL).setTimeout(timeValueMillis(100)).execute().actionGet();
fail("can't index, not enough active shard copies");
} catch (UnavailableShardsException e) {
assertThat(e.status(), equalTo(RestStatus.SERVICE_UNAVAILABLE));
assertThat(e.getMessage(), startsWith("[test][0] Not enough active copies to meet shard count of [" + ActiveShardCount.ALL + "] (have 2, needed 3). Timeout: [100ms], request:"));
// but really, all is well
}
allowNodes("test", 3);
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForActiveShards(3).setWaitForGreenStatus().execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
// this should work, since we now have all shards started
client().prepareIndex("test", "type1", "1").setSource(source("1", "test"), XContentType.JSON).setWaitForActiveShards(ActiveShardCount.ALL).setTimeout(timeValueSeconds(1)).execute().actionGet();
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class OpenCloseIndexIT method testCloseOpenWildcard.
public void testCloseOpenWildcard() {
Client client = client();
createIndex("test1", "test2", "a");
ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForGreenStatus().execute().actionGet();
assertThat(healthResponse.isTimedOut(), equalTo(false));
CloseIndexResponse closeIndexResponse = client.admin().indices().prepareClose("test*").execute().actionGet();
assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
assertIndexIsClosed("test1", "test2");
assertIndexIsOpened("a");
OpenIndexResponse openIndexResponse = client.admin().indices().prepareOpen("test*").execute().actionGet();
assertThat(openIndexResponse.isAcknowledged(), equalTo(true));
assertIndexIsOpened("test1", "test2", "a");
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class IndexRecoveryIT method testDisconnectsWhileRecovering.
public void testDisconnectsWhileRecovering() throws Exception {
final String indexName = "test";
final Settings nodeSettings = Settings.builder().put(RecoverySettings.INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.getKey(), "100ms").put(RecoverySettings.INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.getKey(), "1s").put(MockFSDirectoryService.RANDOM_PREVENT_DOUBLE_WRITE_SETTING.getKey(), // restarted recoveries will delete temp files and write them again
false).build();
// start a master node
internalCluster().startNode(nodeSettings);
final String blueNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "blue").put(nodeSettings).build());
final String redNodeName = internalCluster().startNode(Settings.builder().put("node.attr.color", "red").put(nodeSettings).build());
ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForNodes(">=3").get();
assertThat(response.isTimedOut(), is(false));
client().admin().indices().prepareCreate(indexName).setSettings(Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "blue").put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)).get();
List<IndexRequestBuilder> requests = new ArrayList<>();
int numDocs = scaledRandomIntBetween(25, 250);
for (int i = 0; i < numDocs; i++) {
requests.add(client().prepareIndex(indexName, "type").setSource("{}", XContentType.JSON));
}
indexRandom(true, requests);
ensureSearchable(indexName);
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
final String blueNodeId = internalCluster().getInstance(ClusterService.class, blueNodeName).localNode().getId();
assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty());
SearchResponse searchResponse = client().prepareSearch(indexName).get();
assertHitCount(searchResponse, numDocs);
String[] recoveryActions = new String[] { PeerRecoverySourceService.Actions.START_RECOVERY, PeerRecoveryTargetService.Actions.FILES_INFO, PeerRecoveryTargetService.Actions.FILE_CHUNK, PeerRecoveryTargetService.Actions.CLEAN_FILES, //RecoveryTarget.Actions.TRANSLOG_OPS, <-- may not be sent if already flushed
PeerRecoveryTargetService.Actions.PREPARE_TRANSLOG, PeerRecoveryTargetService.Actions.FINALIZE };
final String recoveryActionToBlock = randomFrom(recoveryActions);
final boolean dropRequests = randomBoolean();
logger.info("--> will {} between blue & red on [{}]", dropRequests ? "drop requests" : "break connection", recoveryActionToBlock);
MockTransportService blueMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, blueNodeName);
MockTransportService redMockTransportService = (MockTransportService) internalCluster().getInstance(TransportService.class, redNodeName);
TransportService redTransportService = internalCluster().getInstance(TransportService.class, redNodeName);
TransportService blueTransportService = internalCluster().getInstance(TransportService.class, blueNodeName);
final CountDownLatch requestBlocked = new CountDownLatch(1);
blueMockTransportService.addDelegate(redTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, blueMockTransportService.original(), requestBlocked));
redMockTransportService.addDelegate(blueTransportService, new RecoveryActionBlocker(dropRequests, recoveryActionToBlock, redMockTransportService.original(), requestBlocked));
logger.info("--> starting recovery from blue to red");
client().admin().indices().prepareUpdateSettings(indexName).setSettings(Settings.builder().put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "color", "red,blue").put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)).get();
requestBlocked.await();
logger.info("--> stopping to block recovery");
blueMockTransportService.clearAllRules();
redMockTransportService.clearAllRules();
ensureGreen();
searchResponse = client(redNodeName).prepareSearch(indexName).setPreference("_local").get();
assertHitCount(searchResponse, numDocs);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class UpdateNumberOfReplicasIT method testSimpleUpdateNumberOfReplicas.
public void testSimpleUpdateNumberOfReplicas() throws Exception {
logger.info("Creating index test");
assertAcked(prepareCreate("test", 2));
logger.info("Running Cluster Health");
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
NumShards numShards = getNumShards("test");
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(numShards.numReplicas));
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.totalNumShards));
for (int i = 0; i < 10; i++) {
client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("value", "test" + i).endObject()).get();
}
refresh();
for (int i = 0; i < 10; i++) {
SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get();
assertHitCount(countResponse, 10L);
}
logger.info("Increasing the number of replicas from 1 to 2");
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 2)).execute().actionGet());
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
//only 2 copies allocated (1 replica) across 2 nodes
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
logger.info("starting another node to new replicas will be allocated to it");
allowNodes("test", 3);
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes(">=3").execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
//all 3 copies allocated across 3 nodes
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
for (int i = 0; i < 10; i++) {
SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get();
assertHitCount(countResponse, 10L);
}
logger.info("Decreasing number of replicas from 2 to 0");
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)).get());
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes(">=3").execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
//a single copy is allocated (replica set to 0)
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries));
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 10);
}
}
Aggregations