use of org.elasticsearch.common.breaker.CircuitBreakingException in project elasticsearch by elastic.
the class HierarchyCircuitBreakerService method checkParentLimit.
/**
* Checks whether the parent breaker has been tripped
*/
public void checkParentLimit(String label) throws CircuitBreakingException {
long totalUsed = 0;
for (CircuitBreaker breaker : this.breakers.values()) {
totalUsed += (breaker.getUsed() * breaker.getOverhead());
}
long parentLimit = this.parentSettings.getLimit();
if (totalUsed > parentLimit) {
this.parentTripCount.incrementAndGet();
final String message = "[parent] Data too large, data for [" + label + "]" + " would be [" + totalUsed + "/" + new ByteSizeValue(totalUsed) + "]" + ", which is larger than the limit of [" + parentLimit + "/" + new ByteSizeValue(parentLimit) + "]";
throw new CircuitBreakingException(message, totalUsed, parentLimit);
}
}
use of org.elasticsearch.common.breaker.CircuitBreakingException in project elasticsearch by elastic.
the class ElasticsearchExceptionTests method randomExceptions.
public static Tuple<Throwable, ElasticsearchException> randomExceptions() {
Throwable actual;
ElasticsearchException expected;
int type = randomIntBetween(0, 5);
switch(type) {
case 0:
actual = new ClusterBlockException(singleton(DiscoverySettings.NO_MASTER_BLOCK_WRITES));
expected = new ElasticsearchException("Elasticsearch exception [type=cluster_block_exception, " + "reason=blocked by: [SERVICE_UNAVAILABLE/2/no master];]");
break;
case 1:
actual = new CircuitBreakingException("Data too large", 123, 456);
expected = new ElasticsearchException("Elasticsearch exception [type=circuit_breaking_exception, reason=Data too large]");
break;
case 2:
actual = new SearchParseException(new TestSearchContext(null), "Parse failure", new XContentLocation(12, 98));
expected = new ElasticsearchException("Elasticsearch exception [type=search_parse_exception, reason=Parse failure]");
break;
case 3:
actual = new IllegalArgumentException("Closed resource", new RuntimeException("Resource"));
expected = new ElasticsearchException("Elasticsearch exception [type=illegal_argument_exception, reason=Closed resource]", new ElasticsearchException("Elasticsearch exception [type=runtime_exception, reason=Resource]"));
break;
case 4:
actual = new SearchPhaseExecutionException("search", "all shards failed", new ShardSearchFailure[] { new ShardSearchFailure(new ParsingException(1, 2, "foobar", null), new SearchShardTarget("node_1", new Index("foo", "_na_"), 1)) });
expected = new ElasticsearchException("Elasticsearch exception [type=search_phase_execution_exception, " + "reason=all shards failed]");
expected.addMetadata("es.phase", "search");
break;
case 5:
actual = new ElasticsearchException("Parsing failed", new ParsingException(9, 42, "Wrong state", new NullPointerException("Unexpected null value")));
ElasticsearchException expectedCause = new ElasticsearchException("Elasticsearch exception [type=parsing_exception, " + "reason=Wrong state]", new ElasticsearchException("Elasticsearch exception [type=null_pointer_exception, " + "reason=Unexpected null value]"));
expected = new ElasticsearchException("Elasticsearch exception [type=exception, reason=Parsing failed]", expectedCause);
break;
default:
throw new UnsupportedOperationException("No randomized exceptions generated for type [" + type + "]");
}
if (actual instanceof ElasticsearchException) {
ElasticsearchException actualException = (ElasticsearchException) actual;
if (randomBoolean()) {
int nbHeaders = randomIntBetween(1, 5);
Map<String, List<String>> randomHeaders = new HashMap<>(nbHeaders);
for (int i = 0; i < nbHeaders; i++) {
List<String> values = new ArrayList<>();
int nbValues = randomIntBetween(1, 3);
for (int j = 0; j < nbValues; j++) {
values.add(frequently() ? randomAsciiOfLength(5) : "");
}
randomHeaders.put("header_" + i, values);
}
for (Map.Entry<String, List<String>> entry : randomHeaders.entrySet()) {
actualException.addHeader(entry.getKey(), entry.getValue());
expected.addHeader(entry.getKey(), entry.getValue());
}
if (rarely()) {
// Empty or null headers are not printed out by the toXContent method
actualException.addHeader("ignored", randomBoolean() ? emptyList() : null);
}
}
if (randomBoolean()) {
int nbMetadata = randomIntBetween(1, 5);
Map<String, List<String>> randomMetadata = new HashMap<>(nbMetadata);
for (int i = 0; i < nbMetadata; i++) {
List<String> values = new ArrayList<>();
int nbValues = randomIntBetween(1, 3);
for (int j = 0; j < nbValues; j++) {
values.add(frequently() ? randomAsciiOfLength(5) : "");
}
randomMetadata.put("es.metadata_" + i, values);
}
for (Map.Entry<String, List<String>> entry : randomMetadata.entrySet()) {
actualException.addMetadata(entry.getKey(), entry.getValue());
expected.addMetadata(entry.getKey(), entry.getValue());
}
if (rarely()) {
// Empty or null metadata are not printed out by the toXContent method
actualException.addMetadata("es.ignored", randomBoolean() ? emptyList() : null);
}
}
if (randomBoolean()) {
int nbResources = randomIntBetween(1, 5);
for (int i = 0; i < nbResources; i++) {
String resourceType = "type_" + i;
String[] resourceIds = new String[randomIntBetween(1, 3)];
for (int j = 0; j < resourceIds.length; j++) {
resourceIds[j] = frequently() ? randomAsciiOfLength(5) : "";
}
actualException.setResources(resourceType, resourceIds);
expected.setResources(resourceType, resourceIds);
}
}
}
return new Tuple<>(actual, expected);
}
use of org.elasticsearch.common.breaker.CircuitBreakingException in project elasticsearch by elastic.
the class CircuitBreakerServiceIT method testBucketBreaker.
public void testBucketBreaker() throws Exception {
if (noopBreakerUsed()) {
logger.info("--> noop breakers used, skipping test");
return;
}
assertAcked(prepareCreate("cb-test", 1, Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
Client client = client();
// Make request breaker limited to a small amount
Settings resetSettings = Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "100b").build();
assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
// index some different terms so we have some field data for loading
int docCount = scaledRandomIntBetween(100, 1000);
List<IndexRequestBuilder> reqs = new ArrayList<>();
for (long id = 0; id < docCount; id++) {
reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", id));
}
indexRandom(true, reqs);
// A terms aggregation on the "test" field should trip the bucket circuit breaker
try {
SearchResponse resp = client.prepareSearch("cb-test").setQuery(matchAllQuery()).addAggregation(terms("my_terms").field("test")).get();
assertTrue("there should be shard failures", resp.getFailedShards() > 0);
fail("aggregation should have tripped the breaker");
} catch (Exception e) {
String errMsg = "CircuitBreakingException[[request] Data too large, data for [<agg [my_terms]>] would be";
assertThat("Exception: [" + e.toString() + "] should contain a CircuitBreakingException", e.toString(), containsString(errMsg));
errMsg = "which is larger than the limit of [100/100b]]";
assertThat("Exception: [" + e.toString() + "] should contain a CircuitBreakingException", e.toString(), containsString(errMsg));
}
}
use of org.elasticsearch.common.breaker.CircuitBreakingException in project elasticsearch by elastic.
the class CircuitBreakerServiceIT method testLimitsRequestSize.
public void testLimitsRequestSize() throws Exception {
ByteSizeValue inFlightRequestsLimit = new ByteSizeValue(8, ByteSizeUnit.KB);
if (noopBreakerUsed()) {
logger.info("--> noop breakers used, skipping test");
return;
}
internalCluster().ensureAtLeastNumDataNodes(2);
NodesStatsResponse nodeStats = client().admin().cluster().prepareNodesStats().get();
List<NodeStats> dataNodeStats = new ArrayList<>();
for (NodeStats stat : nodeStats.getNodes()) {
if (stat.getNode().isDataNode()) {
dataNodeStats.add(stat);
}
}
assertThat(dataNodeStats.size(), greaterThanOrEqualTo(2));
Collections.shuffle(dataNodeStats, random());
// send bulk request from source node to target node later. The sole shard is bound to the target node.
NodeStats targetNode = dataNodeStats.get(0);
NodeStats sourceNode = dataNodeStats.get(1);
assertAcked(prepareCreate("index").setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put("index.routing.allocation.include._name", targetNode.getNode().getName()).put(EnableAllocationDecider.INDEX_ROUTING_REBALANCE_ENABLE_SETTING.getKey(), EnableAllocationDecider.Rebalance.NONE)));
Client client = client(sourceNode.getNode().getName());
// we use the limit size as a (very) rough indication on how many requests we should sent to hit the limit
int numRequests = inFlightRequestsLimit.bytesAsInt();
BulkRequest bulkRequest = new BulkRequest();
for (int i = 0; i < numRequests; i++) {
IndexRequest indexRequest = new IndexRequest("index", "type", Integer.toString(i));
indexRequest.source(Requests.INDEX_CONTENT_TYPE, "field", "value", "num", i);
bulkRequest.add(indexRequest);
}
Settings limitSettings = Settings.builder().put(IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), inFlightRequestsLimit).build();
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(limitSettings));
// can either fail directly with an exception or the response contains exceptions (depending on client)
try {
BulkResponse response = client.bulk(bulkRequest).actionGet();
if (!response.hasFailures()) {
fail("Should have thrown CircuitBreakingException");
} else {
// each item must have failed with CircuitBreakingException
for (BulkItemResponse bulkItemResponse : response) {
Throwable cause = ExceptionsHelper.unwrapCause(bulkItemResponse.getFailure().getCause());
assertThat(cause, instanceOf(CircuitBreakingException.class));
assertEquals(((CircuitBreakingException) cause).getByteLimit(), inFlightRequestsLimit.getBytes());
}
}
} catch (CircuitBreakingException ex) {
assertEquals(ex.getByteLimit(), inFlightRequestsLimit.getBytes());
}
}
use of org.elasticsearch.common.breaker.CircuitBreakingException in project elasticsearch by elastic.
the class CircuitBreakerServiceIT method testRequestBreaker.
public void testRequestBreaker() throws Exception {
if (noopBreakerUsed()) {
logger.info("--> noop breakers used, skipping test");
return;
}
assertAcked(prepareCreate("cb-test", 1, Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, between(0, 1))));
Client client = client();
// Make request breaker limited to a small amount
Settings resetSettings = Settings.builder().put(HierarchyCircuitBreakerService.REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.getKey(), "10b").build();
assertAcked(client.admin().cluster().prepareUpdateSettings().setTransientSettings(resetSettings));
// index some different terms so we have some field data for loading
int docCount = scaledRandomIntBetween(300, 1000);
List<IndexRequestBuilder> reqs = new ArrayList<>();
for (long id = 0; id < docCount; id++) {
reqs.add(client.prepareIndex("cb-test", "type", Long.toString(id)).setSource("test", id));
}
indexRandom(true, reqs);
// A cardinality aggregation uses BigArrays and thus the REQUEST breaker
try {
client.prepareSearch("cb-test").setQuery(matchAllQuery()).addAggregation(cardinality("card").field("test")).get();
fail("aggregation should have tripped the breaker");
} catch (Exception e) {
String errMsg = "CircuitBreakingException[[request] Data too large";
assertThat("Exception: [" + e.toString() + "] should contain a CircuitBreakingException", e.toString(), containsString(errMsg));
errMsg = "which is larger than the limit of [10/10b]]";
assertThat("Exception: [" + e.toString() + "] should contain a CircuitBreakingException", e.toString(), containsString(errMsg));
}
}
Aggregations