use of org.opensearch.search.profile.SearchProfileShardResults in project OpenSearch by opensearch-project.
the class SearchResponseMerger method getMergedResponse.
/**
* Returns the merged response. To be called once all responses have been added through {@link #add(SearchResponse)}
* so that all responses are merged into a single one.
*/
SearchResponse getMergedResponse(SearchResponse.Clusters clusters) {
// we end up calling merge without anything to merge, we just return an empty search response
if (searchResponses.size() == 0) {
return SearchResponse.empty(searchTimeProvider::buildTookInMillis, clusters);
}
int totalShards = 0;
int skippedShards = 0;
int successfulShards = 0;
// the current reduce phase counts as one
int numReducePhases = 1;
List<ShardSearchFailure> failures = new ArrayList<>();
Map<String, ProfileShardResult> profileResults = new HashMap<>();
List<InternalAggregations> aggs = new ArrayList<>();
Map<ShardIdAndClusterAlias, Integer> shards = new TreeMap<>();
List<TopDocs> topDocsList = new ArrayList<>(searchResponses.size());
Map<String, List<Suggest.Suggestion>> groupedSuggestions = new HashMap<>();
Boolean trackTotalHits = null;
SearchPhaseController.TopDocsStats topDocsStats = new SearchPhaseController.TopDocsStats(trackTotalHitsUpTo);
for (SearchResponse searchResponse : searchResponses) {
totalShards += searchResponse.getTotalShards();
skippedShards += searchResponse.getSkippedShards();
successfulShards += searchResponse.getSuccessfulShards();
numReducePhases += searchResponse.getNumReducePhases();
Collections.addAll(failures, searchResponse.getShardFailures());
profileResults.putAll(searchResponse.getProfileResults());
if (searchResponse.getAggregations() != null) {
InternalAggregations internalAggs = (InternalAggregations) searchResponse.getAggregations();
aggs.add(internalAggs);
}
Suggest suggest = searchResponse.getSuggest();
if (suggest != null) {
for (Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>> entries : suggest) {
List<Suggest.Suggestion> suggestionList = groupedSuggestions.computeIfAbsent(entries.getName(), s -> new ArrayList<>());
suggestionList.add(entries);
}
List<CompletionSuggestion> completionSuggestions = suggest.filter(CompletionSuggestion.class);
for (CompletionSuggestion completionSuggestion : completionSuggestions) {
for (CompletionSuggestion.Entry options : completionSuggestion) {
for (CompletionSuggestion.Entry.Option option : options) {
SearchShardTarget shard = option.getHit().getShard();
ShardIdAndClusterAlias shardId = new ShardIdAndClusterAlias(shard.getShardId(), shard.getClusterAlias());
shards.putIfAbsent(shardId, null);
}
}
}
}
SearchHits searchHits = searchResponse.getHits();
final TotalHits totalHits;
if (searchHits.getTotalHits() == null) {
// in case we didn't track total hits, we get null from each cluster, but we need to set 0 eq to the TopDocs
totalHits = new TotalHits(0, TotalHits.Relation.EQUAL_TO);
assert trackTotalHits == null || trackTotalHits == false;
trackTotalHits = false;
} else {
totalHits = searchHits.getTotalHits();
assert trackTotalHits == null || trackTotalHits;
trackTotalHits = true;
}
TopDocs topDocs = searchHitsToTopDocs(searchHits, totalHits, shards);
topDocsStats.add(new TopDocsAndMaxScore(topDocs, searchHits.getMaxScore()), searchResponse.isTimedOut(), searchResponse.isTerminatedEarly());
if (searchHits.getHits().length > 0) {
// there is no point in adding empty search hits and merging them with the others. Also, empty search hits always come
// without sort fields and collapse info, despite sort by field and/or field collapsing was requested, which causes
// issues reconstructing the proper TopDocs instance and breaks mergeTopDocs which expects the same type for each result.
topDocsList.add(topDocs);
}
}
// after going through all the hits and collecting all their distinct shards, we assign shardIndex and set it to the ScoreDocs
setTopDocsShardIndex(shards, topDocsList);
TopDocs topDocs = SearchPhaseController.mergeTopDocs(topDocsList, size, from);
SearchHits mergedSearchHits = topDocsToSearchHits(topDocs, topDocsStats);
setSuggestShardIndex(shards, groupedSuggestions);
Suggest suggest = groupedSuggestions.isEmpty() ? null : new Suggest(Suggest.reduce(groupedSuggestions));
InternalAggregations reducedAggs = InternalAggregations.topLevelReduce(aggs, aggReduceContextBuilder.forFinalReduction());
ShardSearchFailure[] shardFailures = failures.toArray(ShardSearchFailure.EMPTY_ARRAY);
SearchProfileShardResults profileShardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
// make failures ordering consistent between ordinary search and CCS by looking at the shard they come from
Arrays.sort(shardFailures, FAILURES_COMPARATOR);
InternalSearchResponse response = new InternalSearchResponse(mergedSearchHits, reducedAggs, suggest, profileShardResults, topDocsStats.timedOut, topDocsStats.terminatedEarly, numReducePhases);
long tookInMillis = searchTimeProvider.buildTookInMillis();
return new SearchResponse(response, null, totalShards, successfulShards, skippedShards, tookInMillis, shardFailures, clusters, null);
}
use of org.opensearch.search.profile.SearchProfileShardResults in project OpenSearch by opensearch-project.
the class SearchResponseTests method createTestItem.
/**
* if minimal is set, don't include search hits, aggregations, suggest etc... to make test simpler
*/
private SearchResponse createTestItem(boolean minimal, ShardSearchFailure... shardSearchFailures) {
boolean timedOut = randomBoolean();
Boolean terminatedEarly = randomBoolean() ? null : randomBoolean();
int numReducePhases = randomIntBetween(1, 10);
long tookInMillis = randomNonNegativeLong();
int totalShards = randomIntBetween(1, Integer.MAX_VALUE);
int successfulShards = randomIntBetween(0, totalShards);
int skippedShards = randomIntBetween(0, totalShards);
InternalSearchResponse internalSearchResponse;
if (minimal == false) {
SearchHits hits = SearchHitsTests.createTestItem(true, true);
InternalAggregations aggregations = aggregationsTests.createTestInstance();
Suggest suggest = SuggestTests.createTestItem();
SearchProfileShardResults profileShardResults = SearchProfileShardResultsTests.createTestItem();
internalSearchResponse = new InternalSearchResponse(hits, aggregations, suggest, profileShardResults, timedOut, terminatedEarly, numReducePhases);
} else {
internalSearchResponse = InternalSearchResponse.empty();
}
return new SearchResponse(internalSearchResponse, null, totalShards, successfulShards, skippedShards, tookInMillis, shardSearchFailures, randomBoolean() ? randomClusters() : SearchResponse.Clusters.EMPTY);
}
use of org.opensearch.search.profile.SearchProfileShardResults in project OpenSearch by opensearch-project.
the class TransportSearchAction method ccsRemoteReduce.
static void ccsRemoteReduce(SearchRequest searchRequest, OriginalIndices localIndices, Map<String, OriginalIndices> remoteIndices, SearchTimeProvider timeProvider, InternalAggregation.ReduceContextBuilder aggReduceContextBuilder, RemoteClusterService remoteClusterService, ThreadPool threadPool, ActionListener<SearchResponse> listener, BiConsumer<SearchRequest, ActionListener<SearchResponse>> localSearchConsumer) {
if (localIndices == null && remoteIndices.size() == 1) {
// if we are searching against a single remote cluster, we simply forward the original search request to such cluster
// and we directly perform final reduction in the remote cluster
Map.Entry<String, OriginalIndices> entry = remoteIndices.entrySet().iterator().next();
String clusterAlias = entry.getKey();
boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias);
OriginalIndices indices = entry.getValue();
SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), true);
Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias);
remoteClusterClient.search(ccsSearchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
Map<String, ProfileShardResult> profileResults = searchResponse.getProfileResults();
SearchProfileShardResults profile = profileResults == null || profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
InternalSearchResponse internalSearchResponse = new InternalSearchResponse(searchResponse.getHits(), (InternalAggregations) searchResponse.getAggregations(), searchResponse.getSuggest(), profile, searchResponse.isTimedOut(), searchResponse.isTerminatedEarly(), searchResponse.getNumReducePhases());
listener.onResponse(new SearchResponse(internalSearchResponse, searchResponse.getScrollId(), searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getSkippedShards(), timeProvider.buildTookInMillis(), searchResponse.getShardFailures(), new SearchResponse.Clusters(1, 1, 0), searchResponse.pointInTimeId()));
}
@Override
public void onFailure(Exception e) {
if (skipUnavailable) {
listener.onResponse(SearchResponse.empty(timeProvider::buildTookInMillis, new SearchResponse.Clusters(1, 0, 1)));
} else {
listener.onFailure(wrapRemoteClusterFailure(clusterAlias, e));
}
}
});
} else {
SearchResponseMerger searchResponseMerger = createSearchResponseMerger(searchRequest.source(), timeProvider, aggReduceContextBuilder);
AtomicInteger skippedClusters = new AtomicInteger(0);
final AtomicReference<Exception> exceptions = new AtomicReference<>();
int totalClusters = remoteIndices.size() + (localIndices == null ? 0 : 1);
final CountDown countDown = new CountDown(totalClusters);
for (Map.Entry<String, OriginalIndices> entry : remoteIndices.entrySet()) {
String clusterAlias = entry.getKey();
boolean skipUnavailable = remoteClusterService.isSkipUnavailable(clusterAlias);
OriginalIndices indices = entry.getValue();
SearchRequest ccsSearchRequest = SearchRequest.subSearchRequest(searchRequest, indices.indices(), clusterAlias, timeProvider.getAbsoluteStartMillis(), false);
ActionListener<SearchResponse> ccsListener = createCCSListener(clusterAlias, skipUnavailable, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener);
Client remoteClusterClient = remoteClusterService.getRemoteClusterClient(threadPool, clusterAlias);
remoteClusterClient.search(ccsSearchRequest, ccsListener);
}
if (localIndices != null) {
ActionListener<SearchResponse> ccsListener = createCCSListener(RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, false, countDown, skippedClusters, exceptions, searchResponseMerger, totalClusters, listener);
SearchRequest ccsLocalSearchRequest = SearchRequest.subSearchRequest(searchRequest, localIndices.indices(), RemoteClusterAware.LOCAL_CLUSTER_GROUP_KEY, timeProvider.getAbsoluteStartMillis(), false);
localSearchConsumer.accept(ccsLocalSearchRequest, ccsListener);
}
}
}
use of org.opensearch.search.profile.SearchProfileShardResults in project OpenSearch by opensearch-project.
the class SearchPhaseController method reducedQueryPhase.
/**
* Reduces the given query results and consumes all aggregations and profile results.
* @param queryResults a list of non-null query shard results
* @param bufferedAggs a list of pre-collected aggregations.
* @param bufferedTopDocs a list of pre-collected top docs.
* @param numReducePhases the number of non-final reduce phases applied to the query results.
* @see QuerySearchResult#consumeAggs()
* @see QuerySearchResult#consumeProfileResult()
*/
ReducedQueryPhase reducedQueryPhase(Collection<? extends SearchPhaseResult> queryResults, List<InternalAggregations> bufferedAggs, List<TopDocs> bufferedTopDocs, TopDocsStats topDocsStats, int numReducePhases, boolean isScrollRequest, InternalAggregation.ReduceContextBuilder aggReduceContextBuilder, boolean performFinalReduce) {
assert numReducePhases >= 0 : "num reduce phases must be >= 0 but was: " + numReducePhases;
// increment for this phase
numReducePhases++;
if (queryResults.isEmpty()) {
// early terminate we have nothing to reduce
final TotalHits totalHits = topDocsStats.getTotalHits();
return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.getMaxScore(), false, null, null, null, null, SortedTopDocs.EMPTY, null, numReducePhases, 0, 0, true);
}
int total = queryResults.size();
queryResults = queryResults.stream().filter(res -> res.queryResult().isNull() == false).collect(Collectors.toList());
String errorMsg = "must have at least one non-empty search result, got 0 out of " + total;
assert queryResults.isEmpty() == false : errorMsg;
if (queryResults.isEmpty()) {
throw new IllegalStateException(errorMsg);
}
validateMergeSortValueFormats(queryResults);
final QuerySearchResult firstResult = queryResults.stream().findFirst().get().queryResult();
final boolean hasSuggest = firstResult.suggest() != null;
final boolean hasProfileResults = firstResult.hasProfileResults();
// count the total (we use the query result provider here, since we might not get any hits (we scrolled past them))
final Map<String, List<Suggestion>> groupedSuggestions = hasSuggest ? new HashMap<>() : Collections.emptyMap();
final Map<String, ProfileShardResult> profileResults = hasProfileResults ? new HashMap<>(queryResults.size()) : Collections.emptyMap();
int from = 0;
int size = 0;
for (SearchPhaseResult entry : queryResults) {
QuerySearchResult result = entry.queryResult();
from = result.from();
// sorted queries can set the size to 0 if they have enough competitive hits.
size = Math.max(result.size(), size);
if (hasSuggest) {
assert result.suggest() != null;
for (Suggestion<? extends Suggestion.Entry<? extends Suggestion.Entry.Option>> suggestion : result.suggest()) {
List<Suggestion> suggestionList = groupedSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
suggestionList.add(suggestion);
if (suggestion instanceof CompletionSuggestion) {
CompletionSuggestion completionSuggestion = (CompletionSuggestion) suggestion;
completionSuggestion.setShardIndex(result.getShardIndex());
}
}
}
if (bufferedTopDocs.isEmpty() == false) {
assert result.hasConsumedTopDocs() : "firstResult has no aggs but we got non null buffered aggs?";
}
if (hasProfileResults) {
String key = result.getSearchShardTarget().toString();
profileResults.put(key, result.consumeProfileResult());
}
}
final Suggest reducedSuggest;
final List<CompletionSuggestion> reducedCompletionSuggestions;
if (groupedSuggestions.isEmpty()) {
reducedSuggest = null;
reducedCompletionSuggestions = Collections.emptyList();
} else {
reducedSuggest = new Suggest(Suggest.reduce(groupedSuggestions));
reducedCompletionSuggestions = reducedSuggest.filter(CompletionSuggestion.class);
}
final InternalAggregations aggregations = reduceAggs(aggReduceContextBuilder, performFinalReduce, bufferedAggs);
final SearchProfileShardResults shardResults = profileResults.isEmpty() ? null : new SearchProfileShardResults(profileResults);
final SortedTopDocs sortedTopDocs = sortDocs(isScrollRequest, bufferedTopDocs, from, size, reducedCompletionSuggestions);
final TotalHits totalHits = topDocsStats.getTotalHits();
return new ReducedQueryPhase(totalHits, topDocsStats.fetchHits, topDocsStats.getMaxScore(), topDocsStats.timedOut, topDocsStats.terminatedEarly, reducedSuggest, aggregations, shardResults, sortedTopDocs, firstResult.sortValueFormats(), numReducePhases, size, from, false);
}
use of org.opensearch.search.profile.SearchProfileShardResults in project OpenSearch by opensearch-project.
the class SearchResponse method innerFromXContent.
public static SearchResponse innerFromXContent(XContentParser parser) throws IOException {
ensureExpectedToken(Token.FIELD_NAME, parser.currentToken(), parser);
String currentFieldName = parser.currentName();
SearchHits hits = null;
Aggregations aggs = null;
Suggest suggest = null;
SearchProfileShardResults profile = null;
boolean timedOut = false;
Boolean terminatedEarly = null;
int numReducePhases = 1;
long tookInMillis = -1;
int successfulShards = -1;
int totalShards = -1;
// 0 for BWC
int skippedShards = 0;
String scrollId = null;
String searchContextId = null;
List<ShardSearchFailure> failures = new ArrayList<>();
Clusters clusters = Clusters.EMPTY;
for (Token token = parser.nextToken(); token != Token.END_OBJECT; token = parser.nextToken()) {
if (token == Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (SCROLL_ID.match(currentFieldName, parser.getDeprecationHandler())) {
scrollId = parser.text();
} else if (POINT_IN_TIME_ID.match(currentFieldName, parser.getDeprecationHandler())) {
searchContextId = parser.text();
} else if (TOOK.match(currentFieldName, parser.getDeprecationHandler())) {
tookInMillis = parser.longValue();
} else if (TIMED_OUT.match(currentFieldName, parser.getDeprecationHandler())) {
timedOut = parser.booleanValue();
} else if (TERMINATED_EARLY.match(currentFieldName, parser.getDeprecationHandler())) {
terminatedEarly = parser.booleanValue();
} else if (NUM_REDUCE_PHASES.match(currentFieldName, parser.getDeprecationHandler())) {
numReducePhases = parser.intValue();
} else {
parser.skipChildren();
}
} else if (token == Token.START_OBJECT) {
if (SearchHits.Fields.HITS.equals(currentFieldName)) {
hits = SearchHits.fromXContent(parser);
} else if (Aggregations.AGGREGATIONS_FIELD.equals(currentFieldName)) {
aggs = Aggregations.fromXContent(parser);
} else if (Suggest.NAME.equals(currentFieldName)) {
suggest = Suggest.fromXContent(parser);
} else if (SearchProfileShardResults.PROFILE_FIELD.equals(currentFieldName)) {
profile = SearchProfileShardResults.fromXContent(parser);
} else if (RestActions._SHARDS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
while ((token = parser.nextToken()) != Token.END_OBJECT) {
if (token == Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (RestActions.FAILED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
// we don't need it but need to consume it
parser.intValue();
} else if (RestActions.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
successfulShards = parser.intValue();
} else if (RestActions.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
totalShards = parser.intValue();
} else if (RestActions.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
skippedShards = parser.intValue();
} else {
parser.skipChildren();
}
} else if (token == Token.START_ARRAY) {
if (RestActions.FAILURES_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
while ((token = parser.nextToken()) != Token.END_ARRAY) {
failures.add(ShardSearchFailure.fromXContent(parser));
}
} else {
parser.skipChildren();
}
} else {
parser.skipChildren();
}
}
} else if (Clusters._CLUSTERS_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
int successful = -1;
int total = -1;
int skipped = -1;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token.isValue()) {
if (Clusters.SUCCESSFUL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
successful = parser.intValue();
} else if (Clusters.TOTAL_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
total = parser.intValue();
} else if (Clusters.SKIPPED_FIELD.match(currentFieldName, parser.getDeprecationHandler())) {
skipped = parser.intValue();
} else {
parser.skipChildren();
}
} else {
parser.skipChildren();
}
}
clusters = new Clusters(total, successful, skipped);
} else {
parser.skipChildren();
}
}
}
SearchResponseSections searchResponseSections = new SearchResponseSections(hits, aggs, suggest, timedOut, terminatedEarly, profile, numReducePhases);
return new SearchResponse(searchResponseSections, scrollId, totalShards, successfulShards, skippedShards, tookInMillis, failures.toArray(ShardSearchFailure.EMPTY_ARRAY), clusters, searchContextId);
}
Aggregations