use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class UpdateMappingIntegrationIT method testDynamicUpdates.
public void testDynamicUpdates() throws Exception {
client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).put(MapperService.INDEX_MAPPING_TOTAL_FIELDS_LIMIT_SETTING.getKey(), Long.MAX_VALUE)).execute().actionGet();
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
int recCount = randomIntBetween(200, 600);
int numberOfTypes = randomIntBetween(1, 5);
List<IndexRequestBuilder> indexRequests = new ArrayList<>();
for (int rec = 0; rec < recCount; rec++) {
String type = "type" + (rec % numberOfTypes);
String fieldName = "field_" + type + "_" + rec;
indexRequests.add(client().prepareIndex("test", type, Integer.toString(rec)).setSource(fieldName, "some_value"));
}
indexRandom(true, indexRequests);
logger.info("checking all the documents are there");
RefreshResponse refreshResponse = client().admin().indices().prepareRefresh().execute().actionGet();
assertThat(refreshResponse.getFailedShards(), equalTo(0));
SearchResponse response = client().prepareSearch("test").setSize(0).execute().actionGet();
assertThat(response.getHits().getTotalHits(), equalTo((long) recCount));
logger.info("checking all the fields are in the mappings");
for (int rec = 0; rec < recCount; rec++) {
String type = "type" + (rec % numberOfTypes);
String fieldName = "field_" + type + "_" + rec;
assertConcreteMappingsOnAll("test", type, fieldName);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class CircuitBreakerServiceIT method testParentChecking.
/**
* Test that a breaker correctly redistributes to a different breaker, in
* this case, the fielddata breaker borrows space from the request breaker
*/
@TestLogging("_root:DEBUG,org.elasticsearch.action.search:TRACE")
public void testParentChecking() throws Exception {
if (noopBreakerUsed()) {
logger.info("--> noop breakers used, skipping test");
return;
}
assertAcked(prepareCreate("cb-test", 1, Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))).addMapping("type", "test", "type=text,fielddata=true"));
Client client = client();
// index some different terms so we have some field data for loading
int docCount = scaledRandomIntBetween(300, 1000);
List<IndexRequestBuilder> reqs = new ArrayList<>();
for (long id = 0; id < docCount; id++) {
reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", "value" + id));
}
indexRandom(true, reqs);
Settings resetSettings = Settings.builder().put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b").put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0).build();
assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
// Perform a search to load field data for the "test" field
try {
client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get();
fail("should have thrown an exception");
} catch (Exception e) {
String errMsg = "CircuitBreakingException[[fielddata] Data too large, data for [test] would be";
assertThat("Exception: [" + e.toString() + "] should contain a CircuitBreakingException", e.toString(), containsString(errMsg));
errMsg = "which is larger than the limit of [10/10b]]";
assertThat("Exception: [" + e.toString() + "] should contain a CircuitBreakingException", e.toString(), containsString(errMsg));
}
// execute a search that loads field data (sorting on the "test" field)
// again, this time it should trip the breaker
SearchRequestBuilder searchRequest = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC);
String errMsg = "Data too large, data for [test] would be";
assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg));
errMsg = "which is larger than the limit of [10/10b]";
assertFailures(searchRequest, RestStatus.INTERNAL_SERVER_ERROR, containsString(errMsg));
reset();
// Adjust settings so the parent breaker will fail, but neither the fielddata breaker nor the node request breaker will fail
resetSettings = Settings.builder().put(HierarchyCircuitBreakerService.TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "500b").put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "90%").put(HierarchyCircuitBreakerService.FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.getKey(), 1.0).build();
client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings).execute().actionGet();
// Perform a search to load field data for the "test" field
try {
SearchResponse searchResponse = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addSort("test", SortOrder.DESC).get();
if (searchResponse.getShardFailures().length > 0) {
// each shard must have failed with CircuitBreakingException
for (ShardSearchFailure shardSearchFailure : searchResponse.getShardFailures()) {
Throwable cause = ExceptionsHelper.unwrap(shardSearchFailure.getCause(), CircuitBreakingException.class);
assertThat(cause, instanceOf(CircuitBreakingException.class));
assertEquals(((CircuitBreakingException) cause).getByteLimit(), 500L);
}
} else {
fail("should have thrown a CircuitBreakingException");
}
} catch (Exception e) {
Throwable cause = ExceptionsHelper.unwrap(e, CircuitBreakingException.class);
assertThat(cause, instanceOf(CircuitBreakingException.class));
assertEquals(((CircuitBreakingException) cause).getByteLimit(), 500L);
assertThat("Exception: [" + cause.toString() + "] should be caused by the parent circuit breaker", cause.toString(), startsWith("CircuitBreakingException[[parent] Data too large"));
}
reset();
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class UpdateNumberOfReplicasIT method testSimpleUpdateNumberOfReplicas.
public void testSimpleUpdateNumberOfReplicas() throws Exception {
logger.info("Creating index test");
assertAcked(prepareCreate("test", 2));
logger.info("Running Cluster Health");
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
NumShards numShards = getNumShards("test");
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(numShards.numReplicas));
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.totalNumShards));
for (int i = 0; i < 10; i++) {
client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("value", "test" + i).endObject()).get();
}
refresh();
for (int i = 0; i < 10; i++) {
SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get();
assertHitCount(countResponse, 10L);
}
logger.info("Increasing the number of replicas from 1 to 2");
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 2)).execute().actionGet());
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus().setWaitForActiveShards(numShards.numPrimaries * 2).execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
//only 2 copies allocated (1 replica) across 2 nodes
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 2));
logger.info("starting another node to new replicas will be allocated to it");
allowNodes("test", 3);
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes(">=3").execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(2));
//all 3 copies allocated across 3 nodes
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries * 3));
for (int i = 0; i < 10; i++) {
SearchResponse countResponse = client().prepareSearch().setSize(0).setQuery(matchAllQuery()).get();
assertHitCount(countResponse, 10L);
}
logger.info("Decreasing number of replicas from 2 to 0");
assertAcked(client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)).get());
logger.info("Running Cluster Health");
clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().setWaitForNoRelocatingShards(true).setWaitForNodes(">=3").execute().actionGet();
logger.info("Done Cluster Health, status {}", clusterHealth.getStatus());
assertThat(clusterHealth.isTimedOut(), equalTo(false));
assertThat(clusterHealth.getStatus(), equalTo(ClusterHealthStatus.GREEN));
assertThat(clusterHealth.getIndices().get("test").getActivePrimaryShards(), equalTo(numShards.numPrimaries));
assertThat(clusterHealth.getIndices().get("test").getNumberOfReplicas(), equalTo(0));
//a single copy is allocated (replica set to 0)
assertThat(clusterHealth.getIndices().get("test").getActiveShards(), equalTo(numShards.numPrimaries));
for (int i = 0; i < 10; i++) {
assertHitCount(client().prepareSearch().setQuery(matchAllQuery()).get(), 10);
}
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class OpenCloseIndexIT method testOpenCloseWithDocs.
public void testOpenCloseWithDocs() throws IOException, ExecutionException, InterruptedException {
String mapping = XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "keyword").endObject().endObject().endObject().endObject().string();
assertAcked(client().admin().indices().prepareCreate("test").addMapping("type", mapping, XContentType.JSON));
ensureGreen();
int docs = between(10, 100);
IndexRequestBuilder[] builder = new IndexRequestBuilder[docs];
for (int i = 0; i < docs; i++) {
builder[i] = client().prepareIndex("test", "type", "" + i).setSource("test", "init");
}
indexRandom(true, builder);
if (randomBoolean()) {
client().admin().indices().prepareFlush("test").setForce(true).execute().get();
}
client().admin().indices().prepareClose("test").execute().get();
// check the index still contains the records that we indexed
client().admin().indices().prepareOpen("test").execute().get();
ensureGreen();
SearchResponse searchResponse = client().prepareSearch().setTypes("type").setQuery(QueryBuilders.matchQuery("test", "init")).get();
assertNoFailures(searchResponse);
assertHitCount(searchResponse, docs);
}
use of org.graylog.shaded.elasticsearch7.org.elasticsearch.action.search.SearchResponse in project elasticsearch by elastic.
the class IndexLookupIT method checkExceptions.
private void checkExceptions(Script script) {
try {
SearchResponse sr = client().prepareSearch("test").setQuery(QueryBuilders.matchAllQuery()).addScriptField("tvtest", script).execute().actionGet();
assertThat(sr.getHits().getHits().length, equalTo(0));
ShardSearchFailure[] shardFails = sr.getShardFailures();
for (ShardSearchFailure fail : shardFails) {
assertThat(fail.reason().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."), Matchers.greaterThan(-1));
}
} catch (SearchPhaseExecutionException ex) {
assertThat("got " + ex.toString(), ex.toString().indexOf("Cannot iterate twice! If you want to iterate more that once, add _CACHE explicitly."), Matchers.greaterThan(-1));
}
}
Aggregations