Search in sources :

Example 26 with QuerySearchResult

use of org.opensearch.search.query.QuerySearchResult in project OpenSearch by opensearch-project.

the class IndicesService method loadIntoContext.

/**
 * Loads the cache result, computing it if needed by executing the query phase and otherwise deserializing the cached
 * value into the {@link SearchContext#queryResult() context's query result}. The combination of load + compute allows
 * to have a single load operation that will cause other requests with the same key to wait till its loaded an reuse
 * the same cache.
 */
public void loadIntoContext(ShardSearchRequest request, SearchContext context, QueryPhase queryPhase) throws Exception {
    assert canCache(request, context);
    final DirectoryReader directoryReader = context.searcher().getDirectoryReader();
    boolean[] loadedFromCache = new boolean[] { true };
    BytesReference bytesReference = cacheShardLevelResult(context.indexShard(), directoryReader, request.cacheKey(), out -> {
        queryPhase.execute(context);
        context.queryResult().writeToNoId(out);
        loadedFromCache[0] = false;
    });
    if (loadedFromCache[0]) {
        // restore the cached query result into the context
        final QuerySearchResult result = context.queryResult();
        StreamInput in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistry);
        result.readFromWithId(context.id(), in);
        result.setSearchShardTarget(context.shardTarget());
    } else if (context.queryResult().searchTimedOut()) {
        // we have to invalidate the cache entry if we cached a query result form a request that timed out.
        // we can't really throw exceptions in the loading part to signal a timed out search to the outside world since if there are
        // multiple requests that wait for the cache entry to be calculated they'd fail all with the same exception.
        // instead we all caching such a result for the time being, return the timed out result for all other searches with that cache
        // key invalidate the result in the thread that caused the timeout. This will end up to be simpler and eventually correct since
        // running a search that times out concurrently will likely timeout again if it's run while we have this `stale` result in the
        // cache. One other option is to not cache requests with a timeout at all...
        indicesRequestCache.invalidate(new IndexShardCacheEntity(context.indexShard()), directoryReader, request.cacheKey());
        if (logger.isTraceEnabled()) {
            logger.trace("Query timed out, invalidating cache entry for request on shard [{}]:\n {}", request.shardId(), request.source());
        }
    }
}
Also used : BytesReference(org.opensearch.common.bytes.BytesReference) DirectoryReader(org.apache.lucene.index.DirectoryReader) QuerySearchResult(org.opensearch.search.query.QuerySearchResult) NamedWriteableAwareStreamInput(org.opensearch.common.io.stream.NamedWriteableAwareStreamInput) StreamInput(org.opensearch.common.io.stream.StreamInput) NamedWriteableAwareStreamInput(org.opensearch.common.io.stream.NamedWriteableAwareStreamInput)

Example 27 with QuerySearchResult

use of org.opensearch.search.query.QuerySearchResult in project OpenSearch by opensearch-project.

the class SearchPhaseControllerTests method testConsumerSortByField.

public void testConsumerSortByField() throws Exception {
    int expectedNumResults = randomIntBetween(1, 100);
    int bufferSize = randomIntBetween(2, 200);
    SearchRequest request = randomSearchRequest();
    int size = randomIntBetween(1, 10);
    request.setBatchedReduceSize(bufferSize);
    QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults(fixedExecutor, new NoopCircuitBreaker(CircuitBreaker.REQUEST), SearchProgressListener.NOOP, request, expectedNumResults, exc -> {
    });
    AtomicInteger max = new AtomicInteger();
    SortField[] sortFields = { new SortField("field", SortField.Type.INT, true) };
    DocValueFormat[] docValueFormats = { DocValueFormat.RAW };
    CountDownLatch latch = new CountDownLatch(expectedNumResults);
    for (int i = 0; i < expectedNumResults; i++) {
        int number = randomIntBetween(1, 1000);
        max.updateAndGet(prev -> Math.max(prev, number));
        FieldDoc[] fieldDocs = { new FieldDoc(0, Float.NaN, new Object[] { number }) };
        TopDocs topDocs = new TopFieldDocs(new TotalHits(1, Relation.EQUAL_TO), fieldDocs, sortFields);
        QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null);
        result.topDocs(new TopDocsAndMaxScore(topDocs, Float.NaN), docValueFormats);
        result.setShardIndex(i);
        result.size(size);
        consumer.consumeResult(result, latch::countDown);
    }
    latch.await();
    SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
    assertAggReduction(request);
    assertEquals(Math.min(expectedNumResults, size), reduce.sortedTopDocs.scoreDocs.length);
    assertEquals(expectedNumResults, reduce.totalHits.value);
    assertEquals(max.get(), ((FieldDoc) reduce.sortedTopDocs.scoreDocs[0]).fields[0]);
    assertTrue(reduce.sortedTopDocs.isSortedByField);
    assertEquals(1, reduce.sortedTopDocs.sortFields.length);
    assertEquals("field", reduce.sortedTopDocs.sortFields[0].getField());
    assertEquals(SortField.Type.INT, reduce.sortedTopDocs.sortFields[0].getType());
    assertNull(reduce.sortedTopDocs.collapseField);
    assertNull(reduce.sortedTopDocs.collapseValues);
}
Also used : TotalHits(org.apache.lucene.search.TotalHits) FieldDoc(org.apache.lucene.search.FieldDoc) DocValueFormat(org.opensearch.search.DocValueFormat) CollapseTopFieldDocs(org.apache.lucene.search.grouping.CollapseTopFieldDocs) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) SortField(org.apache.lucene.search.SortField) CountDownLatch(java.util.concurrent.CountDownLatch) TopDocsAndMaxScore(org.opensearch.common.lucene.search.TopDocsAndMaxScore) TopDocs(org.apache.lucene.search.TopDocs) ShardId(org.opensearch.index.shard.ShardId) ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) QuerySearchResult(org.opensearch.search.query.QuerySearchResult) SearchShardTarget(org.opensearch.search.SearchShardTarget) NoopCircuitBreaker(org.opensearch.common.breaker.NoopCircuitBreaker)

Example 28 with QuerySearchResult

use of org.opensearch.search.query.QuerySearchResult in project OpenSearch by opensearch-project.

the class SearchPhaseControllerTests method testConsumerSuggestions.

public void testConsumerSuggestions() throws Exception {
    int expectedNumResults = randomIntBetween(1, 100);
    int bufferSize = randomIntBetween(2, 200);
    SearchRequest request = randomSearchRequest();
    request.setBatchedReduceSize(bufferSize);
    QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults(fixedExecutor, new NoopCircuitBreaker(CircuitBreaker.REQUEST), SearchProgressListener.NOOP, request, expectedNumResults, exc -> {
    });
    int maxScoreTerm = -1;
    int maxScorePhrase = -1;
    int maxScoreCompletion = -1;
    CountDownLatch latch = new CountDownLatch(expectedNumResults);
    for (int i = 0; i < expectedNumResults; i++) {
        QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null);
        List<Suggest.Suggestion<? extends Suggest.Suggestion.Entry<? extends Suggest.Suggestion.Entry.Option>>> suggestions = new ArrayList<>();
        {
            TermSuggestion termSuggestion = new TermSuggestion("term", 1, SortBy.SCORE);
            TermSuggestion.Entry entry = new TermSuggestion.Entry(new Text("entry"), 0, 10);
            int numOptions = randomIntBetween(1, 10);
            for (int j = 0; j < numOptions; j++) {
                int score = numOptions - j;
                maxScoreTerm = Math.max(maxScoreTerm, score);
                entry.addOption(new TermSuggestion.Entry.Option(new Text("option"), randomInt(), score));
            }
            termSuggestion.addTerm(entry);
            suggestions.add(termSuggestion);
        }
        {
            PhraseSuggestion phraseSuggestion = new PhraseSuggestion("phrase", 1);
            PhraseSuggestion.Entry entry = new PhraseSuggestion.Entry(new Text("entry"), 0, 10);
            int numOptions = randomIntBetween(1, 10);
            for (int j = 0; j < numOptions; j++) {
                int score = numOptions - j;
                maxScorePhrase = Math.max(maxScorePhrase, score);
                entry.addOption(new PhraseSuggestion.Entry.Option(new Text("option"), new Text("option"), score));
            }
            phraseSuggestion.addTerm(entry);
            suggestions.add(phraseSuggestion);
        }
        {
            CompletionSuggestion completionSuggestion = new CompletionSuggestion("completion", 1, false);
            CompletionSuggestion.Entry entry = new CompletionSuggestion.Entry(new Text("entry"), 0, 10);
            int numOptions = randomIntBetween(1, 10);
            for (int j = 0; j < numOptions; j++) {
                int score = numOptions - j;
                maxScoreCompletion = Math.max(maxScoreCompletion, score);
                CompletionSuggestion.Entry.Option option = new CompletionSuggestion.Entry.Option(j, new Text("option"), score, Collections.emptyMap());
                entry.addOption(option);
            }
            completionSuggestion.addTerm(entry);
            suggestions.add(completionSuggestion);
        }
        result.suggest(new Suggest(suggestions));
        result.topDocs(new TopDocsAndMaxScore(Lucene.EMPTY_TOP_DOCS, Float.NaN), new DocValueFormat[0]);
        result.setShardIndex(i);
        result.size(0);
        consumer.consumeResult(result, latch::countDown);
    }
    latch.await();
    SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
    assertEquals(3, reduce.suggest.size());
    {
        TermSuggestion term = reduce.suggest.getSuggestion("term");
        assertEquals(1, term.getEntries().size());
        assertEquals(1, term.getEntries().get(0).getOptions().size());
        assertEquals(maxScoreTerm, term.getEntries().get(0).getOptions().get(0).getScore(), 0f);
    }
    {
        PhraseSuggestion phrase = reduce.suggest.getSuggestion("phrase");
        assertEquals(1, phrase.getEntries().size());
        assertEquals(1, phrase.getEntries().get(0).getOptions().size());
        assertEquals(maxScorePhrase, phrase.getEntries().get(0).getOptions().get(0).getScore(), 0f);
    }
    {
        CompletionSuggestion completion = reduce.suggest.getSuggestion("completion");
        assertEquals(1, completion.getSize());
        assertEquals(1, completion.getOptions().size());
        CompletionSuggestion.Entry.Option option = completion.getOptions().get(0);
        assertEquals(maxScoreCompletion, option.getScore(), 0f);
    }
    assertAggReduction(request);
    assertEquals(1, reduce.sortedTopDocs.scoreDocs.length);
    assertEquals(maxScoreCompletion, reduce.sortedTopDocs.scoreDocs[0].score, 0f);
    assertEquals(0, reduce.sortedTopDocs.scoreDocs[0].doc);
    assertNotEquals(-1, reduce.sortedTopDocs.scoreDocs[0].shardIndex);
    assertEquals(0, reduce.totalHits.value);
    assertFalse(reduce.sortedTopDocs.isSortedByField);
    assertNull(reduce.sortedTopDocs.sortFields);
    assertNull(reduce.sortedTopDocs.collapseField);
    assertNull(reduce.sortedTopDocs.collapseValues);
}
Also used : CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Suggest(org.opensearch.search.suggest.Suggest) TopDocsAndMaxScore(org.opensearch.common.lucene.search.TopDocsAndMaxScore) ShardId(org.opensearch.index.shard.ShardId) CompletionSuggestion(org.opensearch.search.suggest.completion.CompletionSuggestion) PhraseSuggestion(org.opensearch.search.suggest.phrase.PhraseSuggestion) TermSuggestion(org.opensearch.search.suggest.term.TermSuggestion) TermSuggestion(org.opensearch.search.suggest.term.TermSuggestion) NoopCircuitBreaker(org.opensearch.common.breaker.NoopCircuitBreaker) PhraseSuggestion(org.opensearch.search.suggest.phrase.PhraseSuggestion) CompletionSuggestion(org.opensearch.search.suggest.completion.CompletionSuggestion) Text(org.opensearch.common.text.Text) CountDownLatch(java.util.concurrent.CountDownLatch) ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) QuerySearchResult(org.opensearch.search.query.QuerySearchResult) SearchShardTarget(org.opensearch.search.SearchShardTarget)

Example 29 with QuerySearchResult

use of org.opensearch.search.query.QuerySearchResult in project OpenSearch by opensearch-project.

the class SearchPhaseControllerTests method testReduceCase.

private void testReduceCase(boolean shouldFail) throws Exception {
    int expectedNumResults = randomIntBetween(20, 200);
    int bufferSize = randomIntBetween(2, expectedNumResults - 1);
    SearchRequest request = new SearchRequest();
    request.source(new SearchSourceBuilder().aggregation(AggregationBuilders.avg("foo")).size(0));
    request.setBatchedReduceSize(bufferSize);
    AtomicBoolean hasConsumedFailure = new AtomicBoolean();
    AssertingCircuitBreaker circuitBreaker = new AssertingCircuitBreaker(CircuitBreaker.REQUEST);
    boolean shouldFailPartial = shouldFail && randomBoolean();
    if (shouldFailPartial) {
        circuitBreaker.shouldBreak.set(true);
    }
    QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults(fixedExecutor, circuitBreaker, SearchProgressListener.NOOP, request, expectedNumResults, exc -> hasConsumedFailure.set(true));
    CountDownLatch latch = new CountDownLatch(expectedNumResults);
    Thread[] threads = new Thread[expectedNumResults];
    for (int i = 0; i < expectedNumResults; i++) {
        final int index = i;
        threads[index] = new Thread(() -> {
            QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId(UUIDs.randomBase64UUID(), index), new SearchShardTarget("node", new ShardId("a", "b", index), null, OriginalIndices.NONE), null);
            result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(0, TotalHits.Relation.EQUAL_TO), Lucene.EMPTY_SCORE_DOCS), Float.NaN), new DocValueFormat[0]);
            InternalAggregations aggs = InternalAggregations.from(Collections.singletonList(new InternalMax("test", 0d, DocValueFormat.RAW, Collections.emptyMap())));
            result.aggregations(aggs);
            result.setShardIndex(index);
            result.size(1);
            consumer.consumeResult(result, latch::countDown);
        });
        threads[index].start();
    }
    for (int i = 0; i < expectedNumResults; i++) {
        threads[i].join();
    }
    latch.await();
    if (shouldFail) {
        if (shouldFailPartial == false) {
            circuitBreaker.shouldBreak.set(true);
        }
        CircuitBreakingException exc = expectThrows(CircuitBreakingException.class, () -> consumer.reduce());
        assertEquals(shouldFailPartial, hasConsumedFailure.get());
        assertThat(exc.getMessage(), containsString("<reduce_aggs>"));
        circuitBreaker.shouldBreak.set(false);
    } else {
        SearchPhaseController.ReducedQueryPhase phase = consumer.reduce();
    }
    consumer.close();
    assertThat(circuitBreaker.allocated, equalTo(0L));
}
Also used : TotalHits(org.apache.lucene.search.TotalHits) InternalMax(org.opensearch.search.aggregations.metrics.InternalMax) DocValueFormat(org.opensearch.search.DocValueFormat) CountDownLatch(java.util.concurrent.CountDownLatch) SearchSourceBuilder(org.opensearch.search.builder.SearchSourceBuilder) TopDocsAndMaxScore(org.opensearch.common.lucene.search.TopDocsAndMaxScore) ShardId(org.opensearch.index.shard.ShardId) TopDocs(org.apache.lucene.search.TopDocs) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) InternalAggregations(org.opensearch.search.aggregations.InternalAggregations) QuerySearchResult(org.opensearch.search.query.QuerySearchResult) CircuitBreakingException(org.opensearch.common.breaker.CircuitBreakingException) SearchShardTarget(org.opensearch.search.SearchShardTarget)

Example 30 with QuerySearchResult

use of org.opensearch.search.query.QuerySearchResult in project OpenSearch by opensearch-project.

the class SearchPhaseControllerTests method testConsumerOnlyHits.

public void testConsumerOnlyHits() throws Exception {
    int expectedNumResults = randomIntBetween(1, 100);
    int bufferSize = randomIntBetween(2, 200);
    SearchRequest request = randomSearchRequest();
    if (randomBoolean()) {
        request.source(new SearchSourceBuilder().size(randomIntBetween(1, 10)));
    }
    request.setBatchedReduceSize(bufferSize);
    QueryPhaseResultConsumer consumer = searchPhaseController.newSearchPhaseResults(fixedExecutor, new NoopCircuitBreaker(CircuitBreaker.REQUEST), SearchProgressListener.NOOP, request, expectedNumResults, exc -> {
    });
    AtomicInteger max = new AtomicInteger();
    CountDownLatch latch = new CountDownLatch(expectedNumResults);
    for (int i = 0; i < expectedNumResults; i++) {
        int number = randomIntBetween(1, 1000);
        max.updateAndGet(prev -> Math.max(prev, number));
        QuerySearchResult result = new QuerySearchResult(new ShardSearchContextId("", i), new SearchShardTarget("node", new ShardId("a", "b", i), null, OriginalIndices.NONE), null);
        result.topDocs(new TopDocsAndMaxScore(new TopDocs(new TotalHits(1, TotalHits.Relation.EQUAL_TO), new ScoreDoc[] { new ScoreDoc(0, number) }), number), new DocValueFormat[0]);
        result.setShardIndex(i);
        result.size(1);
        consumer.consumeResult(result, latch::countDown);
    }
    latch.await();
    SearchPhaseController.ReducedQueryPhase reduce = consumer.reduce();
    assertAggReduction(request);
    assertEquals(1, reduce.sortedTopDocs.scoreDocs.length);
    assertEquals(max.get(), reduce.maxScore, 0.0f);
    assertEquals(expectedNumResults, reduce.totalHits.value);
    assertEquals(max.get(), reduce.sortedTopDocs.scoreDocs[0].score, 0.0f);
    assertFalse(reduce.sortedTopDocs.isSortedByField);
    assertNull(reduce.sortedTopDocs.sortFields);
    assertNull(reduce.sortedTopDocs.collapseField);
    assertNull(reduce.sortedTopDocs.collapseValues);
}
Also used : TotalHits(org.apache.lucene.search.TotalHits) CountDownLatch(java.util.concurrent.CountDownLatch) SearchSourceBuilder(org.opensearch.search.builder.SearchSourceBuilder) TopDocsAndMaxScore(org.opensearch.common.lucene.search.TopDocsAndMaxScore) ScoreDoc(org.apache.lucene.search.ScoreDoc) ShardId(org.opensearch.index.shard.ShardId) TopDocs(org.apache.lucene.search.TopDocs) ShardSearchContextId(org.opensearch.search.internal.ShardSearchContextId) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) QuerySearchResult(org.opensearch.search.query.QuerySearchResult) SearchShardTarget(org.opensearch.search.SearchShardTarget) NoopCircuitBreaker(org.opensearch.common.breaker.NoopCircuitBreaker)

Aggregations

QuerySearchResult (org.opensearch.search.query.QuerySearchResult)37 SearchShardTarget (org.opensearch.search.SearchShardTarget)26 TopDocs (org.apache.lucene.search.TopDocs)25 TopDocsAndMaxScore (org.opensearch.common.lucene.search.TopDocsAndMaxScore)25 TotalHits (org.apache.lucene.search.TotalHits)23 ShardId (org.opensearch.index.shard.ShardId)23 NoopCircuitBreaker (org.opensearch.common.breaker.NoopCircuitBreaker)21 ShardSearchContextId (org.opensearch.search.internal.ShardSearchContextId)20 ScoreDoc (org.apache.lucene.search.ScoreDoc)18 CountDownLatch (java.util.concurrent.CountDownLatch)14 ArrayList (java.util.ArrayList)11 SearchPhaseResult (org.opensearch.search.SearchPhaseResult)10 SearchSourceBuilder (org.opensearch.search.builder.SearchSourceBuilder)10 InternalAggregations (org.opensearch.search.aggregations.InternalAggregations)9 FetchSearchResult (org.opensearch.search.fetch.FetchSearchResult)9 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)8 QueryFetchSearchResult (org.opensearch.search.fetch.QueryFetchSearchResult)8 SearchHit (org.opensearch.search.SearchHit)7 SearchHits (org.opensearch.search.SearchHits)7 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)6