use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class SearchScrollAsyncAction method run.
private void run(BiFunction<String, String, DiscoveryNode> clusterNodeLookup, final SearchContextIdForNode[] context) {
final CountDown counter = new CountDown(scrollId.getContext().length);
for (int i = 0; i < context.length; i++) {
SearchContextIdForNode target = context[i];
final int shardIndex = i;
final Transport.Connection connection;
try {
DiscoveryNode node = clusterNodeLookup.apply(target.getClusterAlias(), target.getNode());
if (node == null) {
throw new IllegalStateException("node [" + target.getNode() + "] is not available");
}
connection = getConnection(target.getClusterAlias(), node);
} catch (Exception ex) {
onShardFailure("query", counter, target.getSearchContextId(), ex, null, () -> SearchScrollAsyncAction.this.moveToNextPhase(clusterNodeLookup));
continue;
}
final InternalScrollSearchRequest internalRequest = TransportSearchHelper.internalScrollSearchRequest(target.getSearchContextId(), request);
// we can't create a SearchShardTarget here since we don't know the index and shard ID we are talking to
// we only know the node and the search context ID. Yet, the response will contain the SearchShardTarget
// from the target node instead...that's why we pass null here
SearchActionListener<T> searchActionListener = new SearchActionListener<T>(null, shardIndex) {
@Override
protected void setSearchShardTarget(T response) {
// don't do this - it's part of the response...
assert response.getSearchShardTarget() != null : "search shard target must not be null";
if (target.getClusterAlias() != null) {
// re-create the search target and add the cluster alias if there is any,
// we need this down the road for subseq. phases
SearchShardTarget searchShardTarget = response.getSearchShardTarget();
response.setSearchShardTarget(new SearchShardTarget(searchShardTarget.getNodeId(), searchShardTarget.getShardId(), target.getClusterAlias(), null));
}
}
@Override
protected void innerOnResponse(T result) {
assert shardIndex == result.getShardIndex() : "shard index mismatch: " + shardIndex + " but got: " + result.getShardIndex();
onFirstPhaseResult(shardIndex, result);
if (counter.countDown()) {
SearchPhase phase = moveToNextPhase(clusterNodeLookup);
try {
phase.run();
} catch (Exception e) {
// we need to fail the entire request here - the entire phase just blew up
// don't call onShardFailure or onFailure here since otherwise we'd countDown the counter
// again which would result in an exception
listener.onFailure(new SearchPhaseExecutionException(phase.getName(), "Phase failed", e, ShardSearchFailure.EMPTY_ARRAY));
}
}
}
@Override
public void onFailure(Exception t) {
onShardFailure("query", counter, target.getSearchContextId(), t, null, () -> SearchScrollAsyncAction.this.moveToNextPhase(clusterNodeLookup));
}
};
executeInitialPhase(connection, internalRequest, searchActionListener);
}
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class QueryPhaseResultConsumer method partialReduce.
private MergeResult partialReduce(QuerySearchResult[] toConsume, List<SearchShard> emptyResults, SearchPhaseController.TopDocsStats topDocsStats, MergeResult lastMerge, int numReducePhases) {
// ensure consistent ordering
Arrays.sort(toConsume, Comparator.comparingInt(QuerySearchResult::getShardIndex));
for (QuerySearchResult result : toConsume) {
topDocsStats.add(result.topDocs(), result.searchTimedOut(), result.terminatedEarly());
}
final TopDocs newTopDocs;
if (hasTopDocs) {
List<TopDocs> topDocsList = new ArrayList<>();
if (lastMerge != null) {
topDocsList.add(lastMerge.reducedTopDocs);
}
for (QuerySearchResult result : toConsume) {
TopDocsAndMaxScore topDocs = result.consumeTopDocs();
SearchPhaseController.setShardIndex(topDocs.topDocs, result.getShardIndex());
topDocsList.add(topDocs.topDocs);
}
newTopDocs = SearchPhaseController.mergeTopDocs(topDocsList, // we have to merge here in the same way we collect on a shard
topNSize, 0);
} else {
newTopDocs = null;
}
final InternalAggregations newAggs;
if (hasAggs) {
List<InternalAggregations> aggsList = new ArrayList<>();
if (lastMerge != null) {
aggsList.add(lastMerge.reducedAggs);
}
for (QuerySearchResult result : toConsume) {
aggsList.add(result.consumeAggs().expand());
}
newAggs = InternalAggregations.topLevelReduce(aggsList, aggReduceContextBuilder.forPartialReduction());
} else {
newAggs = null;
}
List<SearchShard> processedShards = new ArrayList<>(emptyResults);
if (lastMerge != null) {
processedShards.addAll(lastMerge.processedShards);
}
for (QuerySearchResult result : toConsume) {
SearchShardTarget target = result.getSearchShardTarget();
processedShards.add(new SearchShard(target.getClusterAlias(), target.getShardId()));
}
progressListener.notifyPartialReduce(processedShards, topDocsStats.getTotalHits(), newAggs, numReducePhases);
// we leave the results un-serialized because serializing is slow but we compute the serialized
// size as an estimate of the memory used by the newly reduced aggregations.
long serializedSize = hasAggs ? newAggs.getSerializedSize() : 0;
return new MergeResult(processedShards, newTopDocs, newAggs, hasAggs ? serializedSize : 0);
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class SearchContextId method encode.
public static String encode(List<SearchPhaseResult> searchPhaseResults, Map<String, AliasFilter> aliasFilter, Version version) {
final Map<ShardId, SearchContextIdForNode> shards = new HashMap<>();
for (SearchPhaseResult searchPhaseResult : searchPhaseResults) {
final SearchShardTarget target = searchPhaseResult.getSearchShardTarget();
shards.put(target.getShardId(), new SearchContextIdForNode(target.getClusterAlias(), target.getNodeId(), searchPhaseResult.getContextId()));
}
try (BytesStreamOutput out = new BytesStreamOutput()) {
out.setVersion(version);
Version.writeVersion(version, out);
out.writeMap(shards, (o, k) -> k.writeTo(o), (o, v) -> v.writeTo(o));
out.writeMap(aliasFilter, StreamOutput::writeString, (o, v) -> v.writeTo(o));
return Base64.getUrlEncoder().encodeToString(BytesReference.toBytes(out.bytes()));
} catch (IOException e) {
throw new IllegalArgumentException(e);
}
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class OpenSearchExceptionTests method testFailureToAndFromXContentWithDetails.
public void testFailureToAndFromXContentWithDetails() throws IOException {
final XContent xContent = randomFrom(XContentType.values()).xContent();
Exception failure;
Throwable failureCause;
OpenSearchException expected;
OpenSearchException expectedCause;
OpenSearchException suppressed;
switch(randomIntBetween(0, 6)) {
case // Simple opensearch exception without cause
0:
failure = new NoNodeAvailableException("A");
expected = new OpenSearchException("OpenSearch exception [type=no_node_available_exception, reason=A]");
expected.addSuppressed(new OpenSearchException("OpenSearch exception [type=no_node_available_exception, reason=A]"));
break;
case // Simple opensearch exception with headers (other metadata of type number are not parsed)
1:
failure = new ParsingException(3, 2, "B", null);
((OpenSearchException) failure).addHeader("header_name", "0", "1");
expected = new OpenSearchException("OpenSearch exception [type=parsing_exception, reason=B]");
expected.addHeader("header_name", "0", "1");
suppressed = new OpenSearchException("OpenSearch exception [type=parsing_exception, reason=B]");
suppressed.addHeader("header_name", "0", "1");
expected.addSuppressed(suppressed);
break;
case // OpenSearch exception with a cause, headers and parsable metadata
2:
failureCause = new NullPointerException("var is null");
failure = new ScriptException("C", failureCause, singletonList("stack"), "test", "painless");
((OpenSearchException) failure).addHeader("script_name", "my_script");
expectedCause = new OpenSearchException("OpenSearch exception [type=null_pointer_exception, reason=var is null]");
expected = new OpenSearchException("OpenSearch exception [type=script_exception, reason=C]", expectedCause);
expected.addHeader("script_name", "my_script");
expected.addMetadata("opensearch.lang", "painless");
expected.addMetadata("opensearch.script", "test");
expected.addMetadata("opensearch.script_stack", "stack");
suppressed = new OpenSearchException("OpenSearch exception [type=script_exception, reason=C]");
suppressed.addHeader("script_name", "my_script");
suppressed.addMetadata("opensearch.lang", "painless");
suppressed.addMetadata("opensearch.script", "test");
suppressed.addMetadata("opensearch.script_stack", "stack");
expected.addSuppressed(suppressed);
break;
case // JDK exception without cause
3:
failure = new IllegalStateException("D");
expected = new OpenSearchException("OpenSearch exception [type=illegal_state_exception, reason=D]");
suppressed = new OpenSearchException("OpenSearch exception [type=illegal_state_exception, reason=D]");
expected.addSuppressed(suppressed);
break;
case // JDK exception with cause
4:
failureCause = new RoutingMissingException("idx", "id");
failure = new RuntimeException("E", failureCause);
expectedCause = new OpenSearchException("OpenSearch exception [type=routing_missing_exception, " + "reason=routing is required for [idx]/[id]]");
expectedCause.addMetadata("opensearch.index", "idx");
expectedCause.addMetadata("opensearch.index_uuid", "_na_");
expected = new OpenSearchException("OpenSearch exception [type=runtime_exception, reason=E]", expectedCause);
suppressed = new OpenSearchException("OpenSearch exception [type=runtime_exception, reason=E]");
expected.addSuppressed(suppressed);
break;
case // Wrapped exception with cause
5:
failureCause = new FileAlreadyExistsException("File exists");
failure = new BroadcastShardOperationFailedException(new ShardId("_index", "_uuid", 5), "F", failureCause);
expected = new OpenSearchException("OpenSearch exception [type=file_already_exists_exception, reason=File exists]");
suppressed = new OpenSearchException("OpenSearch exception [type=file_already_exists_exception, reason=File exists]");
expected.addSuppressed(suppressed);
break;
case // SearchPhaseExecutionException with cause and multiple failures
6:
DiscoveryNode node = new DiscoveryNode("node_g", buildNewFakeTransportAddress(), Version.CURRENT);
failureCause = new NodeClosedException(node);
failureCause = new NoShardAvailableActionException(new ShardId("_index_g", "_uuid_g", 6), "node_g", failureCause);
ShardSearchFailure[] shardFailures = new ShardSearchFailure[] { new ShardSearchFailure(new ParsingException(0, 0, "Parsing g", null), new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 61), null, OriginalIndices.NONE)), new ShardSearchFailure(new RepositoryException("repository_g", "Repo"), new SearchShardTarget("node_g", new ShardId(new Index("_index_g", "_uuid_g"), 62), null, OriginalIndices.NONE)), new ShardSearchFailure(new SearchContextMissingException(new ShardSearchContextId(UUIDs.randomBase64UUID(), 0L)), null) };
failure = new SearchPhaseExecutionException("phase_g", "G", failureCause, shardFailures);
expectedCause = new OpenSearchException("OpenSearch exception [type=node_closed_exception, " + "reason=node closed " + node + "]");
expectedCause = new OpenSearchException("OpenSearch exception [type=no_shard_available_action_exception, " + "reason=node_g]", expectedCause);
expectedCause.addMetadata("opensearch.index", "_index_g");
expectedCause.addMetadata("opensearch.index_uuid", "_uuid_g");
expectedCause.addMetadata("opensearch.shard", "6");
expected = new OpenSearchException("OpenSearch exception [type=search_phase_execution_exception, " + "reason=G]", expectedCause);
expected.addMetadata("opensearch.phase", "phase_g");
expected.addSuppressed(new OpenSearchException("OpenSearch exception [type=parsing_exception, reason=Parsing g]"));
expected.addSuppressed(new OpenSearchException("OpenSearch exception [type=repository_exception, " + "reason=[repository_g] Repo]"));
expected.addSuppressed(new OpenSearchException("OpenSearch exception [type=search_context_missing_exception, " + "reason=No search context found for id [0]]"));
break;
default:
throw new UnsupportedOperationException("Failed to generate randomized failure");
}
Exception finalFailure = failure;
BytesReference failureBytes = toShuffledXContent((builder, params) -> {
OpenSearchException.generateFailureXContent(builder, params, finalFailure, true);
return builder;
}, xContent.type(), ToXContent.EMPTY_PARAMS, randomBoolean());
try (XContentParser parser = createParser(xContent, failureBytes)) {
failureBytes = BytesReference.bytes(shuffleXContent(parser, randomBoolean()));
}
OpenSearchException parsedFailure;
try (XContentParser parser = createParser(xContent, failureBytes)) {
assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken());
assertEquals(XContentParser.Token.FIELD_NAME, parser.nextToken());
parsedFailure = OpenSearchException.failureFromXContent(parser);
assertEquals(XContentParser.Token.END_OBJECT, parser.nextToken());
assertNull(parser.nextToken());
}
assertDeepEquals(expected, parsedFailure);
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class CanMatchPreFilterSearchPhaseTests method testLotsOfShards.
/*
* In cases that a query coordinating node held all the shards for a query, the can match phase would recurse and end in stack overflow
* when subjected to max concurrent search requests. This test is a test for that situation.
*/
public void testLotsOfShards() throws InterruptedException {
final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime);
final Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>();
final DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT);
final DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT);
lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode));
lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode));
final SearchTransportService searchTransportService = new SearchTransportService(null, null) {
@Override
public void sendCanMatch(Transport.Connection connection, ShardSearchRequest request, SearchTask task, ActionListener<SearchService.CanMatchResponse> listener) {
listener.onResponse(new SearchService.CanMatchResponse(randomBoolean(), null));
}
};
final CountDownLatch latch = new CountDownLatch(1);
final OriginalIndices originalIndices = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS);
final GroupShardsIterator<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter("idx", originalIndices, 4096, randomBoolean(), primaryNode, replicaNode);
final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors()));
final SearchRequest searchRequest = new SearchRequest();
searchRequest.allowPartialSearchResults(true);
SearchTransportService transportService = new SearchTransportService(null, null);
ActionListener<SearchResponse> responseListener = ActionListener.wrap(response -> {
}, (e) -> {
throw new AssertionError("unexpected", e);
});
Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY));
final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), Collections.emptyMap(), Collections.emptyMap(), OpenSearchExecutors.newDirectExecutorService(), searchRequest, null, shardsIter, timeProvider, ClusterState.EMPTY_STATE, null, (iter) -> new AbstractSearchAsyncAction<SearchPhaseResult>("test", logger, transportService, (cluster, node) -> {
assert cluster == null : "cluster was not null: " + cluster;
return lookup.get(node);
}, aliasFilters, Collections.emptyMap(), Collections.emptyMap(), executor, searchRequest, responseListener, iter, new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), ClusterState.EMPTY_STATE, null, new ArraySearchPhaseResults<>(iter.size()), randomIntBetween(1, 32), SearchResponse.Clusters.EMPTY) {
@Override
protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) {
return new SearchPhase("test") {
@Override
public void run() {
latch.countDown();
}
};
}
@Override
protected void executePhaseOnShard(final SearchShardIterator shardIt, final SearchShardTarget shard, final SearchActionListener<SearchPhaseResult> listener) {
if (randomBoolean()) {
listener.onResponse(new SearchPhaseResult() {
});
} else {
listener.onFailure(new Exception("failure"));
}
}
}, SearchResponse.Clusters.EMPTY);
canMatchPhase.start();
latch.await();
executor.shutdown();
}
Aggregations