use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class SearchPhaseControllerTests method testConsumerConcurrently.
public void testConsumerConcurrently() throws Exception {
int expectedNumResults = randomIntBetween(1, 100);
int bufferSize = randomIntBetween(2, 200);
SearchRequest request = randomSearchRequest();
request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")));
request.setBatchedReduceSize(bufferSize);
ArraySearchPhaseResults<SearchPhaseResult> consumer = searchPhaseController.newSearchPhaseResults(fixedExecutor, new NoopCircuitBreaker(CircuitBreaker.REQUEST), SearchProgressListener.NOOP, request, expectedNumResults, exc -> {
});
AtomicInteger max = new AtomicInteger();
Thread[] threads = new Thread[expectedNumResults];
CountDownLatch latch = new CountDownLatch(expectedNumResults);
for (int i = 0; i < expectedNumResults; i++) {
int id = i;
threads[i] = new Thread(() -> {
int number = randomIntBetween(1, 1000);
max.updateAndGet(prev -> Math.max(prev, number));
QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", id), new SearchShardTarget("node", new ShardId("a", "b", id), null, OriginalIndices.NONE), null);
result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), number), new DocValueFormat[0]);
InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())));
result.aggregations(aggs);
result.setShardIndex(id);
result.size(1);
consumer.consumeResult(result, latch::countDown);
});
threads[i].start();
}
for (int i = 0; i < expectedNumResults; i++) {
threads[i].join();
}
latch.await();
SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
assertAggReduction(request);
InternalMax internalMax = (InternalMax) reduce.aggregations.asList().get(0);
assertEquals(max.get(), internalMax.getValue(), 0.0D);
assertEquals(1, reduce.sortedTopDocs.scoreDocs.length);
assertEquals(max.get(), reduce.maxScore, 0.0f);
assertEquals(expectedNumResults, reduce.totalHits.value);
assertEquals(max.get(), reduce.sortedTopDocs.scoreDocs[0].score, 0.0f);
assertFalse(reduce.sortedTopDocs.isSortedByField);
assertNull(reduce.sortedTopDocs.sortFields);
assertNull(reduce.sortedTopDocs.collapseField);
assertNull(reduce.sortedTopDocs.collapseValues);
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class SearchPhaseControllerTests method generateQueryResults.
/**
* Generate random query results received from the provided number of shards, including the provided
* number of search hits and randomly generated completion suggestions based on the name and size of the provided ones.
* Note that <code>shardIndex</code> is already set to the generated completion suggestions to simulate what
* {@link SearchPhaseController#reducedQueryPhase} does,
* meaning that the returned query results can be fed directly to {@link SearchPhaseController#sortDocs}
*/
private static AtomicArray<SearchPhaseResult> generateQueryResults(int nShards, List<CompletionSuggestion> suggestions, int searchHitsSize, boolean useConstantScore) {
AtomicArray<SearchPhaseResult> queryResults = new AtomicArray<>(nShards);
for (int shardIndex = 0; shardIndex < nShards; shardIndex++) {
String clusterAlias = randomBoolean() ? null : "remote";
SearchShardTarget searchShardTarget = new SearchShardTarget("", new ShardId("", "", shardIndex), clusterAlias, OriginalIndices.NONE);
QuerySearchResult querySearchResult = new QuerySearchResult(new ShardSearchContextId("", shardIndex), searchShardTarget, null);
final TopDocs topDocs;
float maxScore = 0;
if (searchHitsSize == 0) {
topDocs = Lucene.EMPTY_TOP_DOCS;
} else {
int nDocs = randomIntBetween(0, searchHitsSize);
ScoreDoc[] scoreDocs = new ScoreDoc[nDocs];
for (int i = 0; i < nDocs; i++) {
float score = useConstantScore ? 1.0F : Math.abs(randomFloat());
scoreDocs[i] = new ScoreDoc(i, score);
maxScore = Math.max(score, maxScore);
}
topDocs = new TopDocs(new TotalHits(scoreDocs.length, TotalHits.Relation.EQUAL_TO), scoreDocs);
}
List<CompletionSuggestion> shardSuggestion = new ArrayList<>();
for (CompletionSuggestion completionSuggestion : suggestions) {
CompletionSuggestion suggestion = new CompletionSuggestion(completionSuggestion.getName(), completionSuggestion.getSize(), false);
final CompletionSuggestion.Entry completionEntry = new CompletionSuggestion.Entry(new Text(""), 0, 5);
suggestion.addTerm(completionEntry);
int optionSize = randomIntBetween(1, suggestion.getSize());
float maxScoreValue = randomIntBetween(suggestion.getSize(), (int) Float.MAX_VALUE);
for (int i = 0; i < optionSize; i++) {
completionEntry.addOption(new CompletionSuggestion.Entry.Option(i, new Text(""), maxScoreValue, Collections.emptyMap()));
float dec = randomIntBetween(0, optionSize);
if (dec <= maxScoreValue) {
maxScoreValue -= dec;
}
}
suggestion.setShardIndex(shardIndex);
shardSuggestion.add(suggestion);
}
querySearchResult.topDocs(new TopDocsAndMaxScore(topDocs, maxScore), null);
querySearchResult.size(searchHitsSize);
querySearchResult.suggest(new Suggest(new ArrayList<>(shardSuggestion)));
querySearchResult.setShardIndex(shardIndex);
queryResults.set(shardIndex, querySearchResult);
}
return queryResults;
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class SearchPhaseControllerTests method testConsumerOnlyAggs.
public void testConsumerOnlyAggs() throws Exception {
int expectedNumResults = randomIntBetween(1, 100);
int bufferSize = randomIntBetween(2, 200);
SearchRequest request = randomSearchRequest();
request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0));
request.setBatchedReduceSize(bufferSize);
QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults(fixedExecutor, new NoopCircuitBreaker(CircuitBreaker.REQUEST), SearchProgressListener.NOOP, request, expectedNumResults, exc -> {
});
AtomicInteger max = new AtomicInteger();
CountDownLatch latch = new CountDownLatch(expectedNumResults);
for (int i = 0; i < expectedNumResults; i++) {
int number = randomIntBetween(1, 1000);
max.updateAndGet(prev -> Math.max(prev, number));
QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null);
result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[0]), number), new DocValueFormat[0]);
InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())));
result.aggregations(aggs);
result.setShardIndex(i);
result.size(1);
consumer.consumeResult(result, latch::countDown);
}
latch.await();
SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
assertAggReduction(request);
InternalMax internalMax = (InternalMax) reduce.aggregations.asList().get(0);
assertEquals(max.get(), internalMax.getValue(), 0.0D);
assertEquals(0, reduce.sortedTopDocs.scoreDocs.length);
assertEquals(max.get(), reduce.maxScore, 0.0f);
assertEquals(expectedNumResults, reduce.totalHits.value);
assertFalse(reduce.sortedTopDocs.isSortedByField);
assertNull(reduce.sortedTopDocs.sortFields);
assertNull(reduce.sortedTopDocs.collapseField);
assertNull(reduce.sortedTopDocs.collapseValues);
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class SearchPhaseControllerTests method testProgressListener.
public void testProgressListener() throws Exception {
int expectedNumResults = randomIntBetween(10, 100);
for (int bufferSize : new int[] { expectedNumResults, expectedNumResults / 2, expectedNumResults / 4, 2 }) {
SearchRequest request = randomSearchRequest();
request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")));
request.setBatchedReduceSize(bufferSize);
AtomicInteger numQueryResultListener = new AtomicInteger();
AtomicInteger numQueryFailureListener = new AtomicInteger();
AtomicInteger numReduceListener = new AtomicInteger();
AtomicReference<InternalAggregations> finalAggsListener = new AtomicReference<>();
AtomicReference<TotalHits> totalHitsListener = new AtomicReference<>();
SearchProgressListener progressListener = new SearchProgressListener() {
@Override
public void onQueryResult(int shardIndex) {
assertThat(shardIndex, lessThan(expectedNumResults));
numQueryResultListener.incrementAndGet();
}
@Override
public void onQueryFailure(int shardIndex, SearchShardTarget shardTarget, Exception exc) {
assertThat(shardIndex, lessThan(expectedNumResults));
numQueryFailureListener.incrementAndGet();
}
@Override
public void onPartialReduce(List<SearchShard> shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) {
assertEquals(numReduceListener.incrementAndGet(), reducePhase);
}
@Override
public void onFinalReduce(List<SearchShard> shards, TotalHits totalHits, InternalAggregations aggs, int reducePhase) {
totalHitsListener.set(totalHits);
finalAggsListener.set(aggs);
assertEquals(numReduceListener.incrementAndGet(), reducePhase);
}
};
QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults(fixedExecutor, new NoopCircuitBreaker(CircuitBreaker.REQUEST), progressListener, request, expectedNumResults, exc -> {
});
AtomicInteger max = new AtomicInteger();
Thread[] threads = new Thread[expectedNumResults];
CountDownLatch latch = new CountDownLatch(expectedNumResults);
for (int i = 0; i < expectedNumResults; i++) {
int id = i;
threads[i] = new Thread(() -> {
int number = randomIntBetween(1, 1000);
max.updateAndGet(prev -> Math.max(prev, number));
QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", id), new SearchShardTarget("node", new ShardId("a", "b", id), null, OriginalIndices.NONE), null);
result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), number), new DocValueFormat[0]);
InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(new InternalMax("test", (double) number, DocValueFormat.RAW, Collections.emptyMap())));
result.aggregations(aggs);
result.setShardIndex(id);
result.size(1);
consumer.consumeResult(result, latch::countDown);
});
threads[i].start();
}
for (int i = 0; i < expectedNumResults; i++) {
threads[i].join();
}
latch.await();
SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
assertAggReduction(request);
InternalMax internalMax = (InternalMax) reduce.aggregations.asList().get(0);
assertEquals(max.get(), internalMax.getValue(), 0.0D);
assertEquals(1, reduce.sortedTopDocs.scoreDocs.length);
assertEquals(max.get(), reduce.maxScore, 0.0f);
assertEquals(expectedNumResults, reduce.totalHits.value);
assertEquals(max.get(), reduce.sortedTopDocs.scoreDocs[0].score, 0.0f);
assertFalse(reduce.sortedTopDocs.isSortedByField);
assertNull(reduce.sortedTopDocs.sortFields);
assertNull(reduce.sortedTopDocs.collapseField);
assertNull(reduce.sortedTopDocs.collapseValues);
assertEquals(reduce.aggregations, finalAggsListener.get());
assertEquals(reduce.totalHits, totalHitsListener.get());
assertEquals(expectedNumResults, numQueryResultListener.get());
assertEquals(0, numQueryFailureListener.get());
assertEquals(numReduceListener.get(), reduce.numReducePhases);
}
}
use of org.opensearch.search.SearchShardTarget in project OpenSearch by opensearch-project.
the class CanMatchPreFilterSearchPhaseTests method testLotsOfShards.
/*
* In cases that a query coordinating node held all the shards for a query, the can match phase would recurse and end in stack overflow
* when subjected to max concurrent search requests. This test is a test for that situation.
*/
public void testLotsOfShards() throws InterruptedException {
final TransportSearchAction.SearchTimeProvider timeProvider = new TransportSearchAction.SearchTimeProvider(0, System.nanoTime(), System::nanoTime);
final Map<String, Transport.Connection> lookup = new ConcurrentHashMap<>();
final DiscoveryNode primaryNode = new DiscoveryNode("node_1", buildNewFakeTransportAddress(), Version.CURRENT);
final DiscoveryNode replicaNode = new DiscoveryNode("node_2", buildNewFakeTransportAddress(), Version.CURRENT);
lookup.put("node1", new SearchAsyncActionTests.MockConnection(primaryNode));
lookup.put("node2", new SearchAsyncActionTests.MockConnection(replicaNode));
final SearchTransportService searchTransportService = new SearchTransportService(null, null) {
@Override
public void sendCanMatch(Transport.Connection connection, ShardSearchRequest request, SearchTask task, ActionListener<SearchService.CanMatchResponse> listener) {
listener.onResponse(new SearchService.CanMatchResponse(randomBoolean(), null));
}
};
final CountDownLatch latch = new CountDownLatch(1);
final OriginalIndices originalIndices = new OriginalIndices(new String[] { "idx" }, SearchRequest.DEFAULT_INDICES_OPTIONS);
final GroupShardsIterator<SearchShardIterator> shardsIter = SearchAsyncActionTests.getShardsIter("idx", originalIndices, 4096, randomBoolean(), primaryNode, replicaNode);
final ExecutorService executor = Executors.newFixedThreadPool(randomIntBetween(1, Runtime.getRuntime().availableProcessors()));
final SearchRequest searchRequest = new SearchRequest();
searchRequest.allowPartialSearchResults(true);
SearchTransportService transportService = new SearchTransportService(null, null);
ActionListener<SearchResponse> responseListener = ActionListener.wrap(response -> {
}, (e) -> {
throw new AssertionError("unexpected", e);
});
Map<String, AliasFilter> aliasFilters = Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY));
final CanMatchPreFilterSearchPhase canMatchPhase = new CanMatchPreFilterSearchPhase(logger, searchTransportService, (clusterAlias, node) -> lookup.get(node), Collections.singletonMap("_na_", new AliasFilter(null, Strings.EMPTY_ARRAY)), Collections.emptyMap(), Collections.emptyMap(), OpenSearchExecutors.newDirectExecutorService(), searchRequest, null, shardsIter, timeProvider, ClusterState.EMPTY_STATE, null, (iter) -> new AbstractSearchAsyncAction<SearchPhaseResult>("test", logger, transportService, (cluster, node) -> {
assert cluster == null : "cluster was not null: " + cluster;
return lookup.get(node);
}, aliasFilters, Collections.emptyMap(), Collections.emptyMap(), executor, searchRequest, responseListener, iter, new TransportSearchAction.SearchTimeProvider(0, 0, () -> 0), ClusterState.EMPTY_STATE, null, new ArraySearchPhaseResults<>(iter.size()), randomIntBetween(1, 32), SearchResponse.Clusters.EMPTY) {
@Override
protected SearchPhase getNextPhase(SearchPhaseResults<SearchPhaseResult> results, SearchPhaseContext context) {
return new SearchPhase("test") {
@Override
public void run() {
latch.countDown();
}
};
}
@Override
protected void executePhaseOnShard(final SearchShardIterator shardIt, final SearchShardTarget shard, final SearchActionListener<SearchPhaseResult> listener) {
if (randomBoolean()) {
listener.onResponse(new SearchPhaseResult() {
});
} else {
listener.onFailure(new Exception("failure"));
}
}
}, SearchResponse.Clusters.EMPTY);
canMatchPhase.start();
latch.await();
executor.shutdown();
}
Aggregations