use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.
the class TransportMultiGetAction method doExecute.
@Override
protected void doExecute(Task task, final MultiGetRequest request, final ActionListener<MultiGetResponse> listener) {
ClusterState clusterState = clusterService.state();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
final AtomicArray<MultiGetItemResponse> responses = new AtomicArray<>(request.items.size());
final Map<ShardId, MultiGetShardRequest> shardRequests = new HashMap<>();
for (int i = 0; i < request.items.size(); i++) {
MultiGetRequest.Item item = request.items.get(i);
String concreteSingleIndex;
try {
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
item.routing(clusterState.metadata().resolveIndexRouting(item.routing(), item.index()));
if ((item.routing() == null) && (clusterState.getMetadata().routingRequired(concreteSingleIndex))) {
responses.set(i, newItemFailure(concreteSingleIndex, item.id(), new RoutingMissingException(concreteSingleIndex, item.id())));
continue;
}
} catch (Exception e) {
responses.set(i, newItemFailure(item.index(), item.id(), e));
continue;
}
ShardId shardId = clusterService.operationRouting().getShards(clusterState, concreteSingleIndex, item.id(), item.routing(), null).shardId();
MultiGetShardRequest shardRequest = shardRequests.get(shardId);
if (shardRequest == null) {
shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.getId());
shardRequests.put(shardId, shardRequest);
}
shardRequest.add(i, item);
}
if (shardRequests.isEmpty()) {
// only failures..
listener.onResponse(new MultiGetResponse(responses.toArray(new MultiGetItemResponse[responses.length()])));
}
executeShardAction(listener, responses, shardRequests);
}
use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.
the class TransportSearchIT method testCircuitBreakerReduceFail.
public void testCircuitBreakerReduceFail() throws Exception {
int numShards = randomIntBetween(1, 10);
indexSomeDocs("test", numShards, numShards * 3);
{
final AtomicArray<Boolean> responses = new AtomicArray<>(10);
final CountDownLatch latch = new CountDownLatch(10);
for (int i = 0; i < 10; i++) {
int batchReduceSize = randomIntBetween(2, Math.max(numShards + 1, 3));
SearchRequest request = client().prepareSearch("test").addAggregation(new TestAggregationBuilder("test")).setBatchedReduceSize(batchReduceSize).request();
final int index = i;
client().search(request, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse response) {
responses.set(index, true);
latch.countDown();
}
@Override
public void onFailure(Exception e) {
responses.set(index, false);
latch.countDown();
}
});
}
latch.await();
assertThat(responses.asList().size(), equalTo(10));
for (boolean resp : responses.asList()) {
assertTrue(resp);
}
assertBusy(() -> assertThat(requestBreakerUsed(), equalTo(0L)));
}
try {
Settings settings = Settings.builder().put("indices.breaker.request.limit", "1b").build();
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
final Client client = client();
assertBusy(() -> {
SearchPhaseExecutionException exc = expectThrows(SearchPhaseExecutionException.class, () -> client.prepareSearch("test").addAggregation(new TestAggregationBuilder("test")).get());
assertThat(ExceptionsHelper.unwrapCause(exc).getCause().getMessage(), containsString("<reduce_aggs>"));
});
final AtomicArray<Exception> exceptions = new AtomicArray<>(10);
final CountDownLatch latch = new CountDownLatch(10);
for (int i = 0; i < 10; i++) {
int batchReduceSize = randomIntBetween(2, Math.max(numShards + 1, 3));
SearchRequest request = client().prepareSearch("test").addAggregation(new TestAggregationBuilder("test")).setBatchedReduceSize(batchReduceSize).request();
final int index = i;
client().search(request, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse response) {
latch.countDown();
}
@Override
public void onFailure(Exception exc) {
exceptions.set(index, exc);
latch.countDown();
}
});
}
latch.await();
assertThat(exceptions.asList().size(), equalTo(10));
for (Exception exc : exceptions.asList()) {
assertThat(ExceptionsHelper.unwrapCause(exc).getCause().getMessage(), containsString("<reduce_aggs>"));
}
assertBusy(() -> assertThat(requestBreakerUsed(), equalTo(0L)));
} finally {
Settings settings = Settings.builder().putNull("indices.breaker.request.limit").build();
assertAcked(client().admin().cluster().prepareUpdateSettings().setTransientSettings(settings));
}
}
use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.
the class TransportSearchIT method testCircuitBreakerFetchFail.
public void testCircuitBreakerFetchFail() throws Exception {
int numShards = randomIntBetween(1, 10);
int numDocs = numShards * 10;
indexSomeDocs("boom", numShards, numDocs);
final AtomicArray<Exception> exceptions = new AtomicArray<>(10);
final CountDownLatch latch = new CountDownLatch(10);
for (int i = 0; i < 10; i++) {
int batchReduceSize = randomIntBetween(2, Math.max(numShards + 1, 3));
SearchRequest request = client().prepareSearch("boom").setBatchedReduceSize(batchReduceSize).setAllowPartialSearchResults(false).request();
final int index = i;
client().search(request, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse response) {
latch.countDown();
}
@Override
public void onFailure(Exception exc) {
exceptions.set(index, exc);
latch.countDown();
}
});
}
latch.await();
assertThat(exceptions.asList().size(), equalTo(10));
for (Exception exc : exceptions.asList()) {
assertThat(ExceptionsHelper.unwrapCause(exc).getCause().getMessage(), containsString("boom"));
}
assertBusy(() -> assertThat(requestBreakerUsed(), equalTo(0L)));
}
use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.
the class TransportMultiSearchAction method doExecute.
@Override
protected void doExecute(Task task, MultiSearchRequest request, ActionListener<MultiSearchResponse> listener) {
final long relativeStartTime = relativeTimeProvider.getAsLong();
ClusterState clusterState = clusterService.state();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
int maxConcurrentSearches = request.maxConcurrentSearchRequests();
if (maxConcurrentSearches == MultiSearchRequest.MAX_CONCURRENT_SEARCH_REQUESTS_DEFAULT) {
maxConcurrentSearches = defaultMaxConcurrentSearches(allocatedProcessors, clusterState);
}
Queue<SearchRequestSlot> searchRequestSlots = new ConcurrentLinkedQueue<>();
for (int i = 0; i < request.requests().size(); i++) {
SearchRequest searchRequest = request.requests().get(i);
searchRequest.setParentTask(client.getLocalNodeId(), task.getId());
searchRequestSlots.add(new SearchRequestSlot(searchRequest, i));
}
int numRequests = request.requests().size();
final AtomicArray<MultiSearchResponse.Item> responses = new AtomicArray<>(numRequests);
final AtomicInteger responseCounter = new AtomicInteger(numRequests);
int numConcurrentSearches = Math.min(numRequests, maxConcurrentSearches);
for (int i = 0; i < numConcurrentSearches; i++) {
executeSearch(searchRequestSlots, responses, responseCounter, listener, relativeStartTime);
}
}
use of org.opensearch.common.util.concurrent.AtomicArray in project OpenSearch by opensearch-project.
the class TransportTasksAction method nodeOperation.
private void nodeOperation(NodeTaskRequest nodeTaskRequest, ActionListener<NodeTasksResponse> listener) {
TasksRequest request = nodeTaskRequest.tasksRequest;
List<OperationTask> tasks = new ArrayList<>();
processTasks(request, tasks::add);
if (tasks.isEmpty()) {
listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), emptyList(), emptyList()));
return;
}
AtomicArray<Tuple<TaskResponse, Exception>> responses = new AtomicArray<>(tasks.size());
final AtomicInteger counter = new AtomicInteger(tasks.size());
for (int i = 0; i < tasks.size(); i++) {
final int taskIndex = i;
ActionListener<TaskResponse> taskListener = new ActionListener<TaskResponse>() {
@Override
public void onResponse(TaskResponse response) {
responses.setOnce(taskIndex, response == null ? null : new Tuple<>(response, null));
respondIfFinished();
}
@Override
public void onFailure(Exception e) {
responses.setOnce(taskIndex, new Tuple<>(null, e));
respondIfFinished();
}
private void respondIfFinished() {
if (counter.decrementAndGet() != 0) {
return;
}
List<TaskResponse> results = new ArrayList<>();
List<TaskOperationFailure> exceptions = new ArrayList<>();
for (Tuple<TaskResponse, Exception> response : responses.asList()) {
if (response.v1() == null) {
assert response.v2() != null;
exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), tasks.get(taskIndex).getId(), response.v2()));
} else {
assert response.v2() == null;
results.add(response.v1());
}
}
listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions));
}
};
try {
taskOperation(request, tasks.get(taskIndex), taskListener);
} catch (Exception e) {
taskListener.onFailure(e);
}
}
}
Aggregations