use of org.elasticsearch.common.util.concurrent.AtomicArray in project elasticsearch by elastic.
the class TransportMultiGetAction method doExecute.
@Override
protected void doExecute(final MultiGetRequest request, final ActionListener<MultiGetResponse> listener) {
ClusterState clusterState = clusterService.state();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
final AtomicArray<MultiGetItemResponse> responses = new AtomicArray<>(request.items.size());
final Map<ShardId, MultiGetShardRequest> shardRequests = new HashMap<>();
for (int i = 0; i < request.items.size(); i++) {
MultiGetRequest.Item item = request.items.get(i);
String concreteSingleIndex;
try {
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), concreteSingleIndex));
if ((item.routing() == null) && (clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type()))) {
String message = "routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]";
responses.set(i, newItemFailure(concreteSingleIndex, item.type(), item.id(), new IllegalArgumentException(message)));
continue;
}
} catch (Exception e) {
responses.set(i, newItemFailure(item.index(), item.type(), item.id(), e));
continue;
}
ShardId shardId = clusterService.operationRouting().getShards(clusterState, concreteSingleIndex, item.id(), item.routing(), null).shardId();
MultiGetShardRequest shardRequest = shardRequests.get(shardId);
if (shardRequest == null) {
shardRequest = new MultiGetShardRequest(request, shardId.getIndexName(), shardId.getId());
shardRequests.put(shardId, shardRequest);
}
shardRequest.add(i, item);
}
if (shardRequests.isEmpty()) {
// only failures..
listener.onResponse(new MultiGetResponse(responses.toArray(new MultiGetItemResponse[responses.length()])));
}
final AtomicInteger counter = new AtomicInteger(shardRequests.size());
for (final MultiGetShardRequest shardRequest : shardRequests.values()) {
shardAction.execute(shardRequest, new ActionListener<MultiGetShardResponse>() {
@Override
public void onResponse(MultiGetShardResponse response) {
for (int i = 0; i < response.locations.size(); i++) {
MultiGetItemResponse itemResponse = new MultiGetItemResponse(response.responses.get(i), response.failures.get(i));
responses.set(response.locations.get(i), itemResponse);
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
@Override
public void onFailure(Exception e) {
// create failures for all relevant requests
for (int i = 0; i < shardRequest.locations.size(); i++) {
MultiGetRequest.Item item = shardRequest.items.get(i);
responses.set(shardRequest.locations.get(i), newItemFailure(shardRequest.index(), item.type(), item.id(), e));
}
if (counter.decrementAndGet() == 0) {
finishHim();
}
}
private void finishHim() {
listener.onResponse(new MultiGetResponse(responses.toArray(new MultiGetItemResponse[responses.length()])));
}
});
}
}
use of org.elasticsearch.common.util.concurrent.AtomicArray in project elasticsearch by elastic.
the class TransportTasksAction method nodeOperation.
private void nodeOperation(NodeTaskRequest nodeTaskRequest, ActionListener<NodeTasksResponse> listener) {
TasksRequest request = nodeTaskRequest.tasksRequest;
List<OperationTask> tasks = new ArrayList<>();
processTasks(request, tasks::add);
if (tasks.isEmpty()) {
listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), emptyList(), emptyList()));
return;
}
AtomicArray<Tuple<TaskResponse, Exception>> responses = new AtomicArray<>(tasks.size());
final AtomicInteger counter = new AtomicInteger(tasks.size());
for (int i = 0; i < tasks.size(); i++) {
final int taskIndex = i;
ActionListener<TaskResponse> taskListener = new ActionListener<TaskResponse>() {
@Override
public void onResponse(TaskResponse response) {
responses.setOnce(taskIndex, response == null ? null : new Tuple<>(response, null));
respondIfFinished();
}
@Override
public void onFailure(Exception e) {
responses.setOnce(taskIndex, new Tuple<>(null, e));
respondIfFinished();
}
private void respondIfFinished() {
if (counter.decrementAndGet() != 0) {
return;
}
List<TaskResponse> results = new ArrayList<>();
List<TaskOperationFailure> exceptions = new ArrayList<>();
for (AtomicArray.Entry<Tuple<TaskResponse, Exception>> response : responses.asList()) {
if (response.value.v1() == null) {
assert response.value.v2() != null;
exceptions.add(new TaskOperationFailure(clusterService.localNode().getId(), tasks.get(taskIndex).getId(), response.value.v2()));
} else {
assert response.value.v2() == null;
results.add(response.value.v1());
}
}
listener.onResponse(new NodeTasksResponse(clusterService.localNode().getId(), results, exceptions));
}
};
try {
taskOperation(request, tasks.get(taskIndex), taskListener);
} catch (Exception e) {
taskListener.onFailure(e);
}
}
}
use of org.elasticsearch.common.util.concurrent.AtomicArray in project elasticsearch by elastic.
the class TransportBulkActionTookTests method createAction.
private TransportBulkAction createAction(boolean controlled, AtomicLong expected) {
CapturingTransport capturingTransport = new CapturingTransport();
TransportService transportService = new TransportService(clusterService.getSettings(), capturingTransport, threadPool, TransportService.NOOP_TRANSPORT_INTERCEPTOR, boundAddress -> clusterService.localNode(), null);
transportService.start();
transportService.acceptIncomingRequests();
IndexNameExpressionResolver resolver = new Resolver(Settings.EMPTY);
ActionFilters actionFilters = new ActionFilters(new HashSet<>());
TransportCreateIndexAction createIndexAction = new TransportCreateIndexAction(Settings.EMPTY, transportService, clusterService, threadPool, null, actionFilters, resolver);
if (controlled) {
return new TestTransportBulkAction(Settings.EMPTY, threadPool, transportService, clusterService, null, createIndexAction, actionFilters, resolver, null, expected::get) {
@Override
void executeBulk(Task task, BulkRequest bulkRequest, long startTimeNanos, ActionListener<BulkResponse> listener, AtomicArray<BulkItemResponse> responses) {
expected.set(1000000);
super.executeBulk(task, bulkRequest, startTimeNanos, listener, responses);
}
};
} else {
return new TestTransportBulkAction(Settings.EMPTY, threadPool, transportService, clusterService, null, createIndexAction, actionFilters, resolver, null, System::nanoTime) {
@Override
void executeBulk(Task task, BulkRequest bulkRequest, long startTimeNanos, ActionListener<BulkResponse> listener, AtomicArray<BulkItemResponse> responses) {
long elapsed = spinForAtLeastOneMillisecond();
expected.set(elapsed);
super.executeBulk(task, bulkRequest, startTimeNanos, listener, responses);
}
};
}
}
use of org.elasticsearch.common.util.concurrent.AtomicArray in project elasticsearch by elastic.
the class CountedCollectorTests method testCollect.
public void testCollect() throws InterruptedException {
AtomicArray<SearchPhaseResult> results = new AtomicArray<>(randomIntBetween(1, 100));
List<Integer> state = new ArrayList<>();
int numResultsExpected = randomIntBetween(1, results.length());
MockSearchPhaseContext context = new MockSearchPhaseContext(results.length());
CountDownLatch latch = new CountDownLatch(1);
boolean maybeFork = randomBoolean();
Executor executor = (runnable) -> {
if (randomBoolean() && maybeFork) {
new Thread(runnable).start();
} else {
runnable.run();
}
};
CountedCollector<SearchPhaseResult> collector = new CountedCollector<>(results::set, numResultsExpected, latch::countDown, context);
for (int i = 0; i < numResultsExpected; i++) {
int shardID = i;
switch(randomIntBetween(0, 2)) {
case 0:
state.add(0);
executor.execute(() -> collector.countDown());
break;
case 1:
state.add(1);
executor.execute(() -> collector.onResult(shardID, new DfsSearchResult(shardID, null), new SearchShardTarget("foo", new Index("bar", "baz"), shardID)));
break;
case 2:
state.add(2);
executor.execute(() -> collector.onFailure(shardID, new SearchShardTarget("foo", new Index("bar", "baz"), shardID), new RuntimeException("boom")));
break;
default:
fail("unknown state");
}
}
latch.await();
assertEquals(numResultsExpected, state.size());
for (int i = 0; i < numResultsExpected; i++) {
switch(state.get(i)) {
case 0:
assertNull(results.get(i));
break;
case 1:
assertNotNull(results.get(i));
assertEquals(i, results.get(i).id());
break;
case 2:
final int shardId = i;
assertEquals(1, context.failures.stream().filter(f -> f.shardId() == shardId).count());
break;
default:
fail("unknown state");
}
}
for (int i = numResultsExpected; i < results.length(); i++) {
assertNull("index: " + i, results.get(i));
}
}
use of org.elasticsearch.common.util.concurrent.AtomicArray in project elasticsearch by elastic.
the class DfsQueryPhaseTests method testDfsWith1ShardFailed.
public void testDfsWith1ShardFailed() throws IOException {
AtomicArray<DfsSearchResult> results = new AtomicArray<>(2);
AtomicReference<AtomicArray<QuerySearchResultProvider>> responseRef = new AtomicReference<>();
results.set(0, new DfsSearchResult(1, new SearchShardTarget("node1", new Index("test", "na"), 0)));
results.set(1, new DfsSearchResult(2, new SearchShardTarget("node2", new Index("test", "na"), 0)));
results.get(0).termsStatistics(new Term[0], new TermStatistics[0]);
results.get(1).termsStatistics(new Term[0], new TermStatistics[0]);
SearchPhaseController controller = new SearchPhaseController(Settings.EMPTY, BigArrays.NON_RECYCLING_INSTANCE, null);
SearchTransportService searchTransportService = new SearchTransportService(Settings.builder().put("search.remote.connect", false).build(), null, null) {
@Override
public void sendExecuteQuery(Transport.Connection connection, QuerySearchRequest request, SearchTask task, ActionListener<QuerySearchResult> listener) {
if (request.id() == 1) {
QuerySearchResult queryResult = new QuerySearchResult(123, new SearchShardTarget("node1", new Index("test", "na"), 0));
queryResult.topDocs(new TopDocs(1, new ScoreDoc[] { new ScoreDoc(42, 1.0F) }, 2.0F), new DocValueFormat[0]);
// the size of the result set
queryResult.size(2);
listener.onResponse(queryResult);
} else if (request.id() == 2) {
listener.onFailure(new MockDirectoryWrapper.FakeIOException());
} else {
fail("no such request ID: " + request.id());
}
}
};
MockSearchPhaseContext mockSearchPhaseContext = new MockSearchPhaseContext(2);
mockSearchPhaseContext.searchTransport = searchTransportService;
DfsQueryPhase phase = new DfsQueryPhase(results, controller, (response) -> new SearchPhase("test") {
@Override
public void run() throws IOException {
responseRef.set(response.results);
}
}, mockSearchPhaseContext);
assertEquals("dfs_query", phase.getName());
phase.run();
mockSearchPhaseContext.assertNoFailure();
assertNotNull(responseRef.get());
assertNotNull(responseRef.get().get(0));
assertNull(responseRef.get().get(0).fetchResult());
assertEquals(1, responseRef.get().get(0).queryResult().topDocs().totalHits);
assertEquals(42, responseRef.get().get(0).queryResult().topDocs().scoreDocs[0].doc);
assertNull(responseRef.get().get(1));
assertEquals(1, mockSearchPhaseContext.numSuccess.get());
assertEquals(1, mockSearchPhaseContext.failures.size());
assertTrue(mockSearchPhaseContext.failures.get(0).getCause() instanceof MockDirectoryWrapper.FakeIOException);
assertEquals(1, mockSearchPhaseContext.releasedSearchContexts.size());
assertTrue(mockSearchPhaseContext.releasedSearchContexts.contains(2L));
assertNull(responseRef.get().get(1));
}
Aggregations