use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class TransportRethrottleActionTests method testRethrottleWithAllSucceeded.
public void testRethrottleWithAllSucceeded() {
List<BulkByScrollTask.StatusOrException> sliceStatuses = new ArrayList<>(slices);
for (int i = 0; i < slices; i++) {
@SuppressWarnings("unchecked") ActionListener<BulkByScrollResponse> listener = i < slices - 1 ? neverCalled() : mock(ActionListener.class);
BulkByScrollTask.Status status = believeableCompletedStatus(i);
task.onSliceResponse(listener, i, new BulkByScrollResponse(timeValueMillis(10), status, emptyList(), emptyList(), false));
if (i == slices - 1) {
// The whole thing succeeded so we should have got the success
captureResponse(BulkByScrollResponse.class, listener).getStatus();
}
sliceStatuses.add(new BulkByScrollTask.StatusOrException(status));
}
rethrottleTestCase(0, listener -> {
}, /* There are no async tasks to simulate because the listener is called for us. */
expectSuccessfulRethrottleWithStatuses(sliceStatuses));
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class IndexShardOperationsLockTests method testThreadContextPreservedIfBlock.
/**
* Tests that the ThreadContext is restored when a operation is executed after it has been delayed due to a block
*/
public void testThreadContextPreservedIfBlock() throws ExecutionException, InterruptedException, TimeoutException {
final ThreadContext context = threadPool.getThreadContext();
final Function<ActionListener<Releasable>, Boolean> contextChecker = (listener) -> {
if ("bar".equals(context.getHeader("foo")) == false) {
listener.onFailure(new IllegalStateException("context did not have value [bar] for header [foo]. Actual value [" + context.getHeader("foo") + "]"));
} else if ("baz".equals(context.getTransient("bar")) == false) {
listener.onFailure(new IllegalStateException("context did not have value [baz] for transient [bar]. Actual value [" + context.getTransient("bar") + "]"));
} else {
return true;
}
return false;
};
PlainActionFuture<Releasable> future = new PlainActionFuture<Releasable>() {
@Override
public void onResponse(Releasable releasable) {
if (contextChecker.apply(this)) {
super.onResponse(releasable);
}
}
};
PlainActionFuture<Releasable> future2 = new PlainActionFuture<Releasable>() {
@Override
public void onResponse(Releasable releasable) {
if (contextChecker.apply(this)) {
super.onResponse(releasable);
}
}
};
try (Releasable releasable = blockAndWait()) {
// when the releasable is closed
try (ThreadContext.StoredContext ignore = context.newStoredContext(false)) {
context.putHeader("foo", "bar");
context.putTransient("bar", "baz");
// test both with and without a executor name
block.acquire(future, ThreadPool.Names.GENERIC, true);
block.acquire(future2, null, true);
}
assertFalse(future.isDone());
}
future.get(1, TimeUnit.HOURS).close();
future2.get(1, TimeUnit.HOURS).close();
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class TransportSearchAction method executeSearch.
private void executeSearch(SearchTask task, long startTimeInMillis, SearchRequest searchRequest, String[] localIndices, List<ShardIterator> remoteShardIterators, Function<String, Transport.Connection> remoteConnections, ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap, ActionListener<SearchResponse> listener) {
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
final Index[] indices;
if (localIndices.length == 0 && remoteShardIterators.size() > 0) {
// don't search on _all if only remote indices were specified
indices = Index.EMPTY_ARRAY;
} else {
indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), startTimeInMillis, localIndices);
}
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
String[] concreteIndices = new String[indices.length];
for (int i = 0; i < indices.length; i++) {
concreteIndices[i] = indices[i].getName();
}
GroupShardsIterator localShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, searchRequest.preference());
GroupShardsIterator shardIterators = mergeShardsIterators(localShardsIterator, remoteShardIterators);
failIfOverShardCountLimit(clusterService, shardIterators.size());
Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState);
// optimize search type for cases where there is only one shard group to search on
if (shardIterators.size() == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_THEN_FETCH);
}
if (searchRequest.isSuggestOnly()) {
// disable request cache if we have only suggest
searchRequest.requestCache(false);
switch(searchRequest.searchType()) {
case DFS_QUERY_THEN_FETCH:
// convert to Q_T_F if we have only suggest
searchRequest.searchType(QUERY_THEN_FETCH);
break;
}
}
final DiscoveryNodes nodes = clusterState.nodes();
Function<String, Transport.Connection> connectionLookup = (nodeId) -> {
final DiscoveryNode discoveryNode = nodes.get(nodeId);
final Transport.Connection connection;
if (discoveryNode != null) {
connection = searchTransportService.getConnection(discoveryNode);
} else {
connection = remoteConnections.apply(nodeId);
}
if (connection == null) {
throw new IllegalStateException("no node found for id: " + nodeId);
}
return connection;
};
searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, connectionLookup, clusterState.version(), Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start();
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class TransportSearchAction method doExecute.
@Override
protected void doExecute(Task task, SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
// pure paranoia if time goes backwards we are at least positive
final long startTimeInMillis = Math.max(0, System.currentTimeMillis());
final String[] localIndices;
final Map<String, List<String>> remoteClusterIndices;
final ClusterState clusterState = clusterService.state();
if (remoteClusterService.isCrossClusterSearchEnabled()) {
remoteClusterIndices = // empty string is not allowed
remoteClusterService.groupClusterIndices(// empty string is not allowed
searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
List<String> remove = remoteClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY);
localIndices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]);
} else {
remoteClusterIndices = Collections.emptyMap();
localIndices = searchRequest.indices();
}
if (remoteClusterIndices.isEmpty()) {
executeSearch((SearchTask) task, startTimeInMillis, searchRequest, localIndices, Collections.emptyList(), (nodeId) -> null, clusterState, Collections.emptyMap(), listener);
} else {
remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> {
List<ShardIterator> remoteShardIterators = new ArrayList<>();
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
Function<String, Transport.Connection> connectionFunction = remoteClusterService.processRemoteShards(searchShardsResponses, remoteShardIterators, remoteAliasFilters);
executeSearch((SearchTask) task, startTimeInMillis, searchRequest, localIndices, remoteShardIterators, connectionFunction, clusterState, remoteAliasFilters, listener);
}, listener::onFailure));
}
}
use of org.elasticsearch.action.ActionListener in project elasticsearch by elastic.
the class ClusterRerouteTests method testClusterStateUpdateTask.
public void testClusterStateUpdateTask() {
AllocationService allocationService = new AllocationService(Settings.builder().build(), new AllocationDeciders(Settings.EMPTY, Collections.singleton(new MaxRetryAllocationDecider(Settings.EMPTY))), new TestGatewayAllocator(), new BalancedShardsAllocator(Settings.EMPTY), EmptyClusterInfoService.INSTANCE);
ClusterState clusterState = createInitialClusterState(allocationService);
ClusterRerouteRequest req = new ClusterRerouteRequest();
req.dryRun(true);
AtomicReference<ClusterRerouteResponse> responseRef = new AtomicReference<>();
ActionListener<ClusterRerouteResponse> responseActionListener = new ActionListener<ClusterRerouteResponse>() {
@Override
public void onResponse(ClusterRerouteResponse clusterRerouteResponse) {
responseRef.set(clusterRerouteResponse);
}
@Override
public void onFailure(Exception e) {
}
};
TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask task = new TransportClusterRerouteAction.ClusterRerouteResponseAckedClusterStateUpdateTask(logger, allocationService, req, responseActionListener);
ClusterState execute = task.execute(clusterState);
// dry-run
assertSame(execute, clusterState);
task.onAllNodesAcked(null);
assertNotSame(responseRef.get().getState(), execute);
// now we allocate
req.dryRun(false);
final int retries = MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY.get(Settings.EMPTY);
// now fail it N-1 times
for (int i = 0; i < retries; i++) {
ClusterState newState = task.execute(clusterState);
// dry-run=false
assertNotSame(newState, clusterState);
clusterState = newState;
RoutingTable routingTable = clusterState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i);
List<FailedShard> failedShards = Collections.singletonList(new FailedShard(routingTable.index("idx").shard(0).shards().get(0), "boom" + i, new UnsupportedOperationException()));
newState = allocationService.applyFailedShards(clusterState, failedShards);
assertThat(newState, not(equalTo(clusterState)));
clusterState = newState;
routingTable = clusterState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
if (i == retries - 1) {
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
} else {
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
}
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), i + 1);
}
// without retry_failed we won't allocate that shard
ClusterState newState = task.execute(clusterState);
// dry-run=false
assertNotSame(newState, clusterState);
task.onAllNodesAcked(null);
assertSame(responseRef.get().getState(), newState);
RoutingTable routingTable = clusterState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), UNASSIGNED);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);
// now we manually retry and get the shard back into initializing
req.setRetryFailed(true);
newState = task.execute(clusterState);
// dry-run=false
assertNotSame(newState, clusterState);
clusterState = newState;
routingTable = clusterState.routingTable();
assertEquals(routingTable.index("idx").shards().size(), 1);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).state(), INITIALIZING);
assertEquals(routingTable.index("idx").shard(0).shards().get(0).unassignedInfo().getNumFailedAllocations(), retries);
}
Aggregations