use of org.elasticsearch.tasks.Task in project elasticsearch by elastic.
the class TransportSearchAction method executeSearch.
private void executeSearch(SearchTask task, long startTimeInMillis, SearchRequest searchRequest, String[] localIndices, List<ShardIterator> remoteShardIterators, Function<String, Transport.Connection> remoteConnections, ClusterState clusterState, Map<String, AliasFilter> remoteAliasMap, ActionListener<SearchResponse> listener) {
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
final Index[] indices;
if (localIndices.length == 0 && remoteShardIterators.size() > 0) {
// don't search on _all if only remote indices were specified
indices = Index.EMPTY_ARRAY;
} else {
indices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest.indicesOptions(), startTimeInMillis, localIndices);
}
Map<String, AliasFilter> aliasFilter = buildPerIndexAliasFilter(searchRequest, clusterState, indices, remoteAliasMap);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
String[] concreteIndices = new String[indices.length];
for (int i = 0; i < indices.length; i++) {
concreteIndices[i] = indices[i].getName();
}
GroupShardsIterator localShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, searchRequest.preference());
GroupShardsIterator shardIterators = mergeShardsIterators(localShardsIterator, remoteShardIterators);
failIfOverShardCountLimit(clusterService, shardIterators.size());
Map<String, Float> concreteIndexBoosts = resolveIndexBoosts(searchRequest, clusterState);
// optimize search type for cases where there is only one shard group to search on
if (shardIterators.size() == 1) {
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
searchRequest.searchType(QUERY_THEN_FETCH);
}
if (searchRequest.isSuggestOnly()) {
// disable request cache if we have only suggest
searchRequest.requestCache(false);
switch(searchRequest.searchType()) {
case DFS_QUERY_THEN_FETCH:
// convert to Q_T_F if we have only suggest
searchRequest.searchType(QUERY_THEN_FETCH);
break;
}
}
final DiscoveryNodes nodes = clusterState.nodes();
Function<String, Transport.Connection> connectionLookup = (nodeId) -> {
final DiscoveryNode discoveryNode = nodes.get(nodeId);
final Transport.Connection connection;
if (discoveryNode != null) {
connection = searchTransportService.getConnection(discoveryNode);
} else {
connection = remoteConnections.apply(nodeId);
}
if (connection == null) {
throw new IllegalStateException("no node found for id: " + nodeId);
}
return connection;
};
searchAsyncAction(task, searchRequest, shardIterators, startTimeInMillis, connectionLookup, clusterState.version(), Collections.unmodifiableMap(aliasFilter), concreteIndexBoosts, listener).start();
}
use of org.elasticsearch.tasks.Task in project elasticsearch by elastic.
the class TransportSearchAction method doExecute.
@Override
protected void doExecute(Task task, SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
// pure paranoia if time goes backwards we are at least positive
final long startTimeInMillis = Math.max(0, System.currentTimeMillis());
final String[] localIndices;
final Map<String, List<String>> remoteClusterIndices;
final ClusterState clusterState = clusterService.state();
if (remoteClusterService.isCrossClusterSearchEnabled()) {
remoteClusterIndices = // empty string is not allowed
remoteClusterService.groupClusterIndices(// empty string is not allowed
searchRequest.indices(), idx -> indexNameExpressionResolver.hasIndexOrAlias(idx, clusterState));
List<String> remove = remoteClusterIndices.remove(RemoteClusterService.LOCAL_CLUSTER_GROUP_KEY);
localIndices = remove == null ? Strings.EMPTY_ARRAY : remove.toArray(new String[remove.size()]);
} else {
remoteClusterIndices = Collections.emptyMap();
localIndices = searchRequest.indices();
}
if (remoteClusterIndices.isEmpty()) {
executeSearch((SearchTask) task, startTimeInMillis, searchRequest, localIndices, Collections.emptyList(), (nodeId) -> null, clusterState, Collections.emptyMap(), listener);
} else {
remoteClusterService.collectSearchShards(searchRequest, remoteClusterIndices, ActionListener.wrap((searchShardsResponses) -> {
List<ShardIterator> remoteShardIterators = new ArrayList<>();
Map<String, AliasFilter> remoteAliasFilters = new HashMap<>();
Function<String, Transport.Connection> connectionFunction = remoteClusterService.processRemoteShards(searchShardsResponses, remoteShardIterators, remoteAliasFilters);
executeSearch((SearchTask) task, startTimeInMillis, searchRequest, localIndices, remoteShardIterators, connectionFunction, clusterState, remoteAliasFilters, listener);
}, listener::onFailure));
}
}
use of org.elasticsearch.tasks.Task in project elasticsearch by elastic.
the class CancellableTasksTests method startCancellableTestNodesAction.
private Task startCancellableTestNodesAction(boolean waitForActionToStart, Collection<TestNode> blockOnNodes, CancellableNodesRequest request, ActionListener<NodesResponse> listener) throws InterruptedException {
CountDownLatch actionLatch = waitForActionToStart ? new CountDownLatch(nodesCount) : null;
CancellableTestNodesAction[] actions = new CancellableTestNodesAction[nodesCount];
for (int i = 0; i < testNodes.length; i++) {
boolean shouldBlock = blockOnNodes.contains(testNodes[i]);
logger.info("The action in the node [{}] should block: [{}]", testNodes[i].getNodeId(), shouldBlock);
actions[i] = new CancellableTestNodesAction(CLUSTER_SETTINGS, "testAction", threadPool, testNodes[i].clusterService, testNodes[i].transportService, shouldBlock, actionLatch);
}
Task task = actions[0].execute(request, listener);
if (waitForActionToStart) {
logger.info("Awaiting for all actions to start");
actionLatch.await();
logger.info("Done waiting for all actions to start");
}
return task;
}
use of org.elasticsearch.tasks.Task in project elasticsearch by elastic.
the class CancellableTasksTests method testTaskCancellationOnCoordinatingNodeLeavingTheCluster.
public void testTaskCancellationOnCoordinatingNodeLeavingTheCluster() throws Exception {
setupTestNodes(Settings.EMPTY);
connectNodes(testNodes);
CountDownLatch responseLatch = new CountDownLatch(1);
boolean simulateBanBeforeLeaving = randomBoolean();
final AtomicReference<NodesResponse> responseReference = new AtomicReference<>();
final AtomicReference<Throwable> throwableReference = new AtomicReference<>();
int blockedNodesCount = randomIntBetween(0, nodesCount - 1);
// We shouldn't block on the first node since it's leaving the cluster anyway so it doesn't matter
List<TestNode> blockOnNodes = randomSubsetOf(blockedNodesCount, Arrays.copyOfRange(testNodes, 1, nodesCount));
Task mainTask = startCancellableTestNodesAction(true, blockOnNodes, new CancellableNodesRequest("Test Request"), new ActionListener<NodesResponse>() {
@Override
public void onResponse(NodesResponse listTasksResponse) {
responseReference.set(listTasksResponse);
responseLatch.countDown();
}
@Override
public void onFailure(Exception e) {
throwableReference.set(e);
responseLatch.countDown();
}
});
String mainNode = testNodes[0].getNodeId();
// Make sure that tasks are running
ListTasksResponse listTasksResponse = testNodes[randomIntBetween(0, testNodes.length - 1)].transportListTasksAction.execute(new ListTasksRequest().setParentTaskId(new TaskId(mainNode, mainTask.getId()))).get();
assertThat(listTasksResponse.getTasks().size(), greaterThanOrEqualTo(blockOnNodes.size()));
// Simulate the coordinating node leaving the cluster
DiscoveryNode[] discoveryNodes = new DiscoveryNode[testNodes.length - 1];
for (int i = 1; i < testNodes.length; i++) {
discoveryNodes[i - 1] = testNodes[i].discoveryNode();
}
DiscoveryNode master = discoveryNodes[0];
for (int i = 1; i < testNodes.length; i++) {
// Notify only nodes that should remain in the cluster
setState(testNodes[i].clusterService, ClusterStateCreationUtils.state(testNodes[i].discoveryNode(), master, discoveryNodes));
}
if (simulateBanBeforeLeaving) {
logger.info("--> Simulate issuing cancel request on the node that is about to leave the cluster");
// Simulate issuing cancel request on the node that is about to leave the cluster
CancelTasksRequest request = new CancelTasksRequest();
request.setReason("Testing Cancellation");
request.setTaskId(new TaskId(testNodes[0].getNodeId(), mainTask.getId()));
// And send the cancellation request to a random node
CancelTasksResponse response = testNodes[0].transportCancelTasksAction.execute(request).get();
logger.info("--> Done simulating issuing cancel request on the node that is about to leave the cluster");
// This node still thinks that's part of the cluster, so cancelling should look successful
if (response.getTasks().size() == 0) {
logger.error("!!!!");
}
assertThat(response.getTasks().size(), lessThanOrEqualTo(1));
assertThat(response.getTaskFailures().size(), lessThanOrEqualTo(1));
assertThat(response.getTaskFailures().size() + response.getTasks().size(), lessThanOrEqualTo(1));
}
for (int i = 1; i < testNodes.length; i++) {
assertEquals("No bans on the node " + i, 0, testNodes[i].transportService.getTaskManager().getBanCount());
}
// Close the first node
testNodes[0].close();
assertBusy(() -> {
// Make sure that tasks are no longer running
try {
ListTasksResponse listTasksResponse1 = testNodes[randomIntBetween(1, testNodes.length - 1)].transportListTasksAction.execute(new ListTasksRequest().setTaskId(new TaskId(mainNode, mainTask.getId()))).get();
assertEquals(0, listTasksResponse1.getTasks().size());
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
} catch (ExecutionException ex2) {
fail("shouldn't be here");
}
});
// Wait for clean up
responseLatch.await();
}
use of org.elasticsearch.tasks.Task in project elasticsearch by elastic.
the class TransportTasksActionTests method testTaskNodeFiltering.
/**
* This test starts nodes actions that blocks on all nodes. While node actions are blocked in the middle of execution
* it executes a tasks action that targets these blocked node actions. The test verifies that task actions are only
* getting executed on nodes that are not listed in the node filter.
*/
public void testTaskNodeFiltering() throws ExecutionException, InterruptedException, IOException {
setupTestNodes(Settings.EMPTY);
connectNodes(testNodes);
CountDownLatch checkLatch = new CountDownLatch(1);
// Start some test nodes action so we could have something to run tasks actions on
ActionFuture<NodesResponse> future = startBlockingTestNodesAction(checkLatch);
String[] allNodes = new String[testNodes.length];
for (int i = 0; i < testNodes.length; i++) {
allNodes[i] = testNodes[i].getNodeId();
}
int filterNodesSize = randomInt(allNodes.length);
Set<String> filterNodes = new HashSet<>(randomSubsetOf(filterNodesSize, allNodes));
logger.info("Filtering out nodes {} size: {}", filterNodes, filterNodesSize);
TestTasksAction[] tasksActions = new TestTasksAction[nodesCount];
for (int i = 0; i < testNodes.length; i++) {
final int node = i;
// Simulate a task action that works on all nodes except nodes listed in filterNodes.
// We are testing that it works.
tasksActions[i] = new TestTasksAction(CLUSTER_SETTINGS, "testTasksAction", threadPool, testNodes[i].clusterService, testNodes[i].transportService) {
@Override
protected String[] filterNodeIds(DiscoveryNodes nodes, String[] nodesIds) {
String[] superNodes = super.filterNodeIds(nodes, nodesIds);
List<String> filteredNodes = new ArrayList<>();
for (String node : superNodes) {
if (filterNodes.contains(node) == false) {
filteredNodes.add(node);
}
}
return filteredNodes.toArray(new String[filteredNodes.size()]);
}
@Override
protected void taskOperation(TestTasksRequest request, Task task, ActionListener<TestTaskResponse> listener) {
if (randomBoolean()) {
listener.onResponse(new TestTaskResponse(testNodes[node].getNodeId()));
} else {
threadPool.generic().execute(() -> listener.onResponse(new TestTaskResponse(testNodes[node].getNodeId())));
}
}
};
}
// Run task action on node tasks that are currently running
// should be successful on all nodes except nodes that we filtered out
TestTasksRequest testTasksRequest = new TestTasksRequest();
// pick all test actions
testTasksRequest.setActions("testAction[n]");
TestTasksResponse response = tasksActions[randomIntBetween(0, nodesCount - 1)].execute(testTasksRequest).get();
// Get successful responses from all nodes except nodes that we filtered out
assertEquals(testNodes.length - filterNodes.size(), response.tasks.size());
// no task failed
assertEquals(0, response.getTaskFailures().size());
// no nodes failed
assertEquals(0, response.getNodeFailures().size());
// Make sure that filtered nodes didn't send any responses
for (TestTaskResponse taskResponse : response.tasks) {
String nodeId = taskResponse.getStatus();
assertFalse("Found response from filtered node " + nodeId, filterNodes.contains(nodeId));
}
// Release all node tasks and wait for response
checkLatch.countDown();
NodesResponse responses = future.get();
assertEquals(0, responses.failureCount());
}
Aggregations