use of org.opensearch.action.search.SearchPhaseExecutionException in project OpenSearch by opensearch-project.
the class PercentilesBucketIT method testBadPercents_asSubAgg.
public void testBadPercents_asSubAgg() throws Exception {
double[] badPercents = { -1.0, 110.0 };
try {
client().prepareSearch("idx").addAggregation(terms("terms").field("tag").order(BucketOrder.key(true)).subAggregation(histogram("histo").field(SINGLE_VALUED_FIELD_NAME).interval(interval).extendedBounds(minRandomValue, maxRandomValue)).subAggregation(percentilesBucket("percentiles_bucket", "histo>_count").setPercents(badPercents))).get();
fail("Illegal percent's were provided but no exception was thrown.");
} catch (Exception e) {
Throwable cause = ExceptionsHelper.unwrapCause(e);
if (cause == null) {
throw e;
} else if (cause instanceof SearchPhaseExecutionException) {
SearchPhaseExecutionException spee = (SearchPhaseExecutionException) e;
Throwable rootCause = spee.getRootCause();
if (!(rootCause instanceof IllegalArgumentException)) {
throw e;
}
} else if (!(cause instanceof IllegalArgumentException)) {
throw e;
}
}
}
use of org.opensearch.action.search.SearchPhaseExecutionException in project OpenSearch by opensearch-project.
the class SearchRedStateIndexIT method testDisallowPartialsWithRedState.
public void testDisallowPartialsWithRedState() throws Exception {
buildRedIndex(cluster().numDataNodes() + 2);
SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch().setSize(0).setAllowPartialSearchResults(false).get());
assertThat(ex.getDetailedMessage(), containsString("Search rejected due to missing shard"));
}
use of org.opensearch.action.search.SearchPhaseExecutionException in project OpenSearch by opensearch-project.
the class SearchRedStateIndexIT method testClusterDisallowPartialsWithRedState.
public void testClusterDisallowPartialsWithRedState() throws Exception {
buildRedIndex(cluster().numDataNodes() + 2);
setClusterDefaultAllowPartialResults(false);
SearchPhaseExecutionException ex = expectThrows(SearchPhaseExecutionException.class, () -> client().prepareSearch().setSize(0).get());
assertThat(ex.getDetailedMessage(), containsString("Search rejected due to missing shard"));
}
use of org.opensearch.action.search.SearchPhaseExecutionException in project OpenSearch by opensearch-project.
the class SearchWhileRelocatingIT method testSearchAndRelocateConcurrently.
private void testSearchAndRelocateConcurrently(final int numberOfReplicas) throws Exception {
final int numShards = between(1, 20);
client().admin().indices().prepareCreate("test").setSettings(Settings.builder().put("index.number_of_shards", numShards).put("index.number_of_replicas", numberOfReplicas)).addMapping("type", "loc", "type=geo_point", "test", "type=text").get();
ensureGreen();
List<IndexRequestBuilder> indexBuilders = new ArrayList<>();
final int numDocs = between(10, 20);
for (int i = 0; i < numDocs; i++) {
indexBuilders.add(client().prepareIndex("test").setId(Integer.toString(i)).setSource(jsonBuilder().startObject().field("test", "value").startObject("loc").field("lat", 11).field("lon", 21).endObject().endObject()));
}
indexRandom(true, indexBuilders.toArray(new IndexRequestBuilder[indexBuilders.size()]));
assertHitCount(client().prepareSearch().get(), (numDocs));
final int numIters = scaledRandomIntBetween(5, 20);
for (int i = 0; i < numIters; i++) {
final AtomicBoolean stop = new AtomicBoolean(false);
final List<String> nonCriticalExceptions = new CopyOnWriteArrayList<>();
Thread[] threads = new Thread[scaledRandomIntBetween(1, 3)];
for (int j = 0; j < threads.length; j++) {
threads[j] = new Thread() {
@Override
public void run() {
try {
while (!stop.get()) {
SearchResponse sr = client().prepareSearch().setSize(numDocs).get();
if (sr.getHits().getTotalHits().value != numDocs) {
// request comes in. It's a small window but a known limitation.
if (sr.getTotalShards() != sr.getSuccessfulShards() && sr.getFailedShards() == 0) {
nonCriticalExceptions.add("Count is " + sr.getHits().getTotalHits().value + " but " + numDocs + " was expected. " + formatShardStatus(sr));
} else {
assertHitCount(sr, numDocs);
}
}
final SearchHits sh = sr.getHits();
assertThat("Expected hits to be the same size the actual hits array", sh.getTotalHits().value, equalTo((long) (sh.getHits().length)));
// this is the more critical but that we hit the actual hit array has a different size than the
// actual number of hits.
}
} catch (SearchPhaseExecutionException ex) {
// with replicas this should not happen
if (numberOfReplicas == 1 || !ex.getMessage().contains("all shards failed")) {
throw ex;
}
}
}
};
}
for (int j = 0; j < threads.length; j++) {
threads[j].start();
}
allowNodes("test", between(1, 3));
client().admin().cluster().prepareReroute().get();
stop.set(true);
for (int j = 0; j < threads.length; j++) {
threads[j].join();
}
// this might time out on some machines if they are really busy and you hit lots of throttling
ClusterHealthResponse resp = client().admin().cluster().prepareHealth().setWaitForYellowStatus().setWaitForNoRelocatingShards(true).setWaitForEvents(Priority.LANGUID).setTimeout("5m").get();
assertNoTimeout(resp);
// if we hit only non-critical exceptions we make sure that the post search works
if (!nonCriticalExceptions.isEmpty()) {
logger.info("non-critical exceptions: {}", nonCriticalExceptions);
for (int j = 0; j < 10; j++) {
assertHitCount(client().prepareSearch().get(), numDocs);
}
}
}
}
use of org.opensearch.action.search.SearchPhaseExecutionException in project OpenSearch by opensearch-project.
the class SearchWithRandomExceptionsIT method testRandomExceptions.
public void testRandomExceptions() throws IOException, InterruptedException, ExecutionException {
String mapping = Strings.toString(XContentFactory.jsonBuilder().startObject().startObject("type").startObject("properties").startObject("test").field("type", "keyword").endObject().endObject().endObject().endObject());
final double lowLevelRate;
final double topLevelRate;
if (frequently()) {
if (randomBoolean()) {
if (randomBoolean()) {
lowLevelRate = 1.0 / between(2, 10);
topLevelRate = 0.0d;
} else {
topLevelRate = 1.0 / between(2, 10);
lowLevelRate = 0.0d;
}
} else {
lowLevelRate = 1.0 / between(2, 10);
topLevelRate = 1.0 / between(2, 10);
}
} else {
// rarely no exception
topLevelRate = 0d;
lowLevelRate = 0d;
}
Builder settings = Settings.builder().put(indexSettings()).put(EXCEPTION_TOP_LEVEL_RATIO_KEY, topLevelRate).put(EXCEPTION_LOW_LEVEL_RATIO_KEY, lowLevelRate).put(MockEngineSupport.WRAP_READER_RATIO.getKey(), 1.0d);
logger.info("creating index: [test] using settings: [{}]", settings.build());
assertAcked(prepareCreate("test").setSettings(settings).addMapping("type", mapping, XContentType.JSON));
ensureSearchable();
final int numDocs = between(10, 100);
int numCreated = 0;
boolean[] added = new boolean[numDocs];
for (int i = 0; i < numDocs; i++) {
try {
IndexResponse indexResponse = client().prepareIndex("test").setId("" + i).setTimeout(TimeValue.timeValueSeconds(1)).setSource("test", English.intToEnglish(i)).get();
if (indexResponse.getResult() == DocWriteResponse.Result.CREATED) {
numCreated++;
added[i] = true;
}
} catch (OpenSearchException ex) {
}
}
logger.info("Start Refresh");
// don't assert on failures here
RefreshResponse refreshResponse = client().admin().indices().prepareRefresh("test").execute().get();
final boolean refreshFailed = refreshResponse.getShardFailures().length != 0 || refreshResponse.getFailedShards() != 0;
logger.info("Refresh failed [{}] numShardsFailed: [{}], shardFailuresLength: [{}], successfulShards: [{}], totalShards: [{}] ", refreshFailed, refreshResponse.getFailedShards(), refreshResponse.getShardFailures().length, refreshResponse.getSuccessfulShards(), refreshResponse.getTotalShards());
NumShards test = getNumShards("test");
final int numSearches = scaledRandomIntBetween(100, 200);
// we don't check anything here really just making sure we don't leave any open files or a broken index behind.
for (int i = 0; i < numSearches; i++) {
try {
int docToQuery = between(0, numDocs - 1);
int expectedResults = added[docToQuery] ? 1 : 0;
logger.info("Searching for [test:{}]", English.intToEnglish(docToQuery));
SearchResponse searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchQuery("test", English.intToEnglish(docToQuery))).setSize(expectedResults).get();
logger.info("Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries);
if (searchResponse.getSuccessfulShards() == test.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(expectedResults, searchResponse);
}
// check match all
searchResponse = client().prepareSearch().setQuery(QueryBuilders.matchAllQuery()).setSize(numCreated).addSort("_id", SortOrder.ASC).get();
logger.info("Match all Successful shards: [{}] numShards: [{}]", searchResponse.getSuccessfulShards(), test.numPrimaries);
if (searchResponse.getSuccessfulShards() == test.numPrimaries && !refreshFailed) {
assertResultsAndLogOnFailure(numCreated, searchResponse);
}
} catch (SearchPhaseExecutionException ex) {
logger.info("expected SearchPhaseException: [{}]", ex.getMessage());
}
}
}
Aggregations