use of org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project storm by apache.
the class AbstractEsBoltIntegrationTest method ensureEsGreen.
private static void ensureEsGreen(Node node) {
ClusterHealthResponse chr = node.client().admin().cluster().health(Requests.clusterHealthRequest().timeout(TimeValue.timeValueSeconds(30)).waitForGreenStatus().waitForEvents(Priority.LANGUID).waitForRelocatingShards(0)).actionGet();
assertThat("cluster status is green", chr.getStatus(), equalTo(ClusterHealthStatus.GREEN));
}
use of org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project crate by crate.
the class RecoveryTests method testPrimaryRelocationWhileIndexing.
@Test
public void testPrimaryRelocationWhileIndexing() throws Exception {
final int numberOfRelocations = 1;
final int numberOfWriters = 2;
final String node1 = internalCluster().startNode();
BlobAdminClient blobAdminClient = internalCluster().getInstance(BlobAdminClient.class, node1);
logger.trace("--> creating test index ...");
Settings indexSettings = Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).build();
blobAdminClient.createBlobTable("test", indexSettings).get();
logger.trace("--> starting [node2] ...");
final String node2 = internalCluster().startNode();
ensureGreen();
final AtomicLong idGenerator = new AtomicLong();
final AtomicLong indexCounter = new AtomicLong();
final AtomicBoolean stop = new AtomicBoolean(false);
Thread[] writers = new Thread[numberOfWriters];
final CountDownLatch stopLatch = new CountDownLatch(writers.length);
logger.trace("--> starting {} blob upload threads", writers.length);
final List<String> uploadedDigests = Collections.synchronizedList(new ArrayList<String>(writers.length));
for (int i = 0; i < writers.length; i++) {
final int indexerId = i;
writers[i] = new Thread() {
@Override
public void run() {
try {
logger.trace("**** starting blob upload thread {}", indexerId);
while (!stop.get()) {
long id = idGenerator.incrementAndGet();
String digest = uploadFile(internalCluster().client(node1), genFile(id));
uploadedDigests.add(digest);
indexCounter.incrementAndGet();
}
logger.trace("**** done indexing thread {}", indexerId);
} catch (Exception e) {
logger.warn("**** failed indexing thread {}", e, indexerId);
} finally {
stopLatch.countDown();
}
}
};
writers[i].setName("blob-uploader-thread");
// dispatch threads from parent, ignoring possible leaking threads
writers[i].setDaemon(true);
writers[i].start();
}
logger.trace("--> waiting for 2 blobs to be uploaded ...");
while (uploadedDigests.size() < 2) {
Thread.sleep(10);
}
logger.trace("--> 2 blobs uploaded");
// increase time between chunks in order to make sure that the upload is taking place while relocating
timeBetweenChunks.set(10);
logger.trace("--> starting relocations...");
for (int i = 0; i < numberOfRelocations; i++) {
String fromNode = (i % 2 == 0) ? node1 : node2;
String toNode = node1.equals(fromNode) ? node2 : node1;
logger.trace("--> START relocate the shard from {} to {}", fromNode, toNode);
internalCluster().client(node1).admin().cluster().prepareReroute().add(new MoveAllocationCommand(new ShardId(BlobIndex.fullIndexName("test"), 0), fromNode, toNode)).execute().actionGet();
ClusterHealthResponse clusterHealthResponse = internalCluster().client(node1).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
clusterHealthResponse = internalCluster().client(node2).admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForRelocatingShards(0).setTimeout(ACCEPTABLE_RELOCATION_TIME).execute().actionGet();
assertThat(clusterHealthResponse.isTimedOut(), equalTo(false));
logger.trace("--> DONE relocate the shard from {} to {}", fromNode, toNode);
}
logger.trace("--> done relocations");
logger.trace("--> marking and waiting for upload threads to stop ...");
timeBetweenChunks.set(0);
stop.set(true);
assertThat(stopLatch.await(60, TimeUnit.SECONDS), is(true));
logger.trace("--> uploading threads stopped");
logger.trace("--> expected {} got {}", indexCounter.get(), uploadedDigests.size());
assertEquals(indexCounter.get(), uploadedDigests.size());
BlobIndicesService blobIndicesService = internalCluster().getInstance(BlobIndicesService.class, node2);
for (String digest : uploadedDigests) {
BlobShard blobShard = blobIndicesService.localBlobShard(BlobIndex.fullIndexName("test"), digest);
long length = blobShard.blobContainer().getFile(digest).length();
assertThat(length, greaterThanOrEqualTo(1L));
}
for (Thread writer : writers) {
writer.join(6000);
}
}
use of org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project crate by crate.
the class DecommissioningService method decommission.
private void decommission() {
// fail on new requests so that clients don't use this node anymore
sqlOperations.disable();
/*
* setting this setting will cause the {@link DecommissionAllocationDecider} to prevent allocations onto this node
*
* nodeIds are part of the key to prevent conflicts if other nodes are being decommissioned in parallel
*/
Settings settings = Settings.builder().put(DECOMMISSION_PREFIX + clusterService.localNode().getId(), true).build();
updateSettingsAction.execute(new ClusterUpdateSettingsRequest().transientSettings(settings), new ActionListener<ClusterUpdateSettingsResponse>() {
@Override
public void onResponse(ClusterUpdateSettingsResponse clusterUpdateSettingsResponse) {
// changing settings triggers AllocationService.reroute -> shards will be relocated
// NOTE: it waits for ALL relocating shards, not just those that involve THIS node.
ClusterHealthRequest request = new ClusterHealthRequest().waitForRelocatingShards(0).waitForEvents(Priority.LANGUID).timeout(gracefulStopTimeout);
if (dataAvailability == DataAvailability.FULL) {
request = request.waitForGreenStatus();
} else {
request = request.waitForYellowStatus();
}
final long startTime = System.nanoTime();
healthAction.execute(request, new ActionListener<ClusterHealthResponse>() {
@Override
public void onResponse(ClusterHealthResponse clusterHealthResponse) {
exitIfNoActiveRequests(startTime);
}
@Override
public void onFailure(Throwable e) {
forceStopOrAbort(e);
}
});
}
@Override
public void onFailure(Throwable e) {
logger.error("Couldn't set settings. Graceful shutdown failed", e);
}
});
}
use of org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class SearchWithRandomIOExceptionsIT method testRandomDirectoryIOExceptions.
public void testRandomDirectoryIOExceptions() throws IOException, InterruptedException, ExecutionException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "keyword").endObject().endObject().endObject().endObject().string();
final double exceptionRate;
final double exceptionOnOpenRate;
if (frequently()) {
if (randomBoolean()) {
if (randomBoolean()) {
exceptionOnOpenRate = 1.0 / between(5, 100);
exceptionRate = 0.0d;
} else {
exceptionRate = 1.0 / between(5, 100);
exceptionOnOpenRate = 0.0d;
}
} else {
exceptionOnOpenRate = 1.0 / between(5, 100);
exceptionRate = 1.0 / between(5, 100);
}
} else {
// rarely no exception
exceptionRate = 0d;
exceptionOnOpenRate = 0d;
}
final boolean createIndexWithoutErrors = randomBoolean();
int numInitialDocs = 0;
if (createIndexWithoutErrors) {
Settings.Builder settings = Settings.builder().put("index.number_of_replicas", numberOfReplicas());
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
numInitialDocs = between(10, 100);
ensureGreen();
for (int i = 0; i < numInitialDocs; i++) {
client().prepareIndex("test", "type", "init" + i).setSource("test", "init").get();
}
client().admin().indices().prepareRefresh("test").execute().get();
client().admin().indices().prepareFlush("test").execute().get();
client().admin().indices().prepareClose("test").execute().get();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), exceptionOnOpenRate));
client().admin().indices().prepareOpen("test").execute().get();
} else {
Settings.Builder settings = Settings.builder().put("index.number_of_replicas", randomIntBetween(0, 1)).put(MockFSIndexStore.INDEX_CHECK_INDEX_ON_CLOSE_SETTING.getKey(), false).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), exceptionRate).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), // we cannot expect that the index will be valid
exceptionOnOpenRate);
logger.info("creating index: [test] using settings: [{}]", settings.build().getAsMap());
client().admin().indices().prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON).execute().actionGet();
}
ClusterHealthResponse clusterHealthResponse = client().admin().cluster().health(Requests.clusterHealthRequest().waitForYellowStatus().timeout(TimeValue.timeValueSeconds(5))).get();
final int numDocs;
final boolean expectAllShardsFailed;
if (clusterHealthResponse.isTimedOut()) {
/* some seeds just won't let you create the index at all and we enter a ping-pong mode
* trying one node after another etc. that is ok but we need to make sure we don't wait
* forever when indexing documents so we set numDocs = 1 and expecte all shards to fail
* when we search below.*/
logger.info("ClusterHealth timed out - only index one doc and expect searches to fail");
numDocs = 1;
expectAllShardsFailed = true;
} else {
numDocs = between(10, 100);
expectAllShardsFailed = false;
}
int numCreated = 0;
boolean[] added = new boolean[numDocs];
for (int i = 0; i < numDocs; i++) {
added[i] = false;
try {
IndexResponse indexResponse = client().prepareIndex("test", "type", Integer.toString(i)).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) {
numCreated++;
added[i] = true;
}
} catch (ElasticsearchException ex) {
}
}
ESIntegTestCase.NumShards numShards = getNumShards("test");
logger.info("Start Refresh");
// don't assert on failures here
final RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
final int numSearches = scaledRandomIntBetween(10, 20);
// we don't check anything here really just making sure we don't leave any open files or a broken index behind.
for (int i = 0; i < numSearches; i++) {
try {
int docToQuery = between(0, numDocs - 1);
int expectedResults = added[docToQuery] ? 1 : 0;
logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults).get();
logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(expectedResults, searchResponse);
}
// check match all
searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated + numInitialDocs).addSort("_uid", SortOrder.ASC).get();
logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), numShards.numPrimaries);
if (searchResponse.getSuccessfulShards() == numShards.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(numCreated + numInitialDocs, searchResponse);
}
} catch (SearchPhaseExecutionException ex) {
logger.info("SearchPhaseException: [{}]", ex.getMessage());
// if a scheduled refresh or flush fails all shards we see all shards failed here
if (!(expectAllShardsFailed || refreshResponse.getSuccessfulShards() == 0 || ex.getMessage().contains("all shards failed"))) {
throw ex;
}
}
}
if (createIndexWithoutErrors) {
// check the index still contains the records that we indexed without errors
client().admin().indices().prepareClose("test").execute().get();
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_SETTING.getKey(), 0).put(MockFSDirectoryService.RANDOM_IO_EXCEPTION_RATE_ON_OPEN_SETTING.getKey(), 0));
client().admin().indices().prepareOpen("test").execute().get();
ensureGreen();
SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get();
assertNoFailures(searchResponse);
assertHitCount(searchResponse, numInitialDocs);
}
}
use of org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse in project elasticsearch by elastic.
the class TribeIT method ensureGreen.
private void ensureGreen(Client client) throws Exception {
assertBusy(() -> {
ClusterHealthResponse clusterHealthResponse = client.admin().cluster().prepareHealth().setWaitForActiveShards(0).setWaitForEvents(Priority.LANGUID).setWaitForNoRelocatingShards(true).get();
assertThat(clusterHealthResponse.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertFalse(clusterHealthResponse.isTimedOut());
});
}
Aggregations