Search in sources :

Example 21 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.

the class TestGrouping method assertEquals.

private void assertEquals(int[] docIDtoID, TopGroups<BytesRef> expected, TopGroups<BytesRef> actual, boolean verifyGroupValues, boolean verifyTotalGroupCount, boolean verifySortValues, boolean testScores, boolean idvBasedImplsUsed) {
    if (expected == null) {
        assertNull(actual);
        return;
    }
    assertNotNull(actual);
    assertEquals("expected.groups.length != actual.groups.length", expected.groups.length, actual.groups.length);
    assertEquals("expected.totalHitCount != actual.totalHitCount", expected.totalHitCount, actual.totalHitCount);
    assertEquals("expected.totalGroupedHitCount != actual.totalGroupedHitCount", expected.totalGroupedHitCount, actual.totalGroupedHitCount);
    if (expected.totalGroupCount != null && verifyTotalGroupCount) {
        assertEquals("expected.totalGroupCount != actual.totalGroupCount", expected.totalGroupCount, actual.totalGroupCount);
    }
    for (int groupIDX = 0; groupIDX < expected.groups.length; groupIDX++) {
        if (VERBOSE) {
            System.out.println("  check groupIDX=" + groupIDX);
        }
        final GroupDocs<BytesRef> expectedGroup = expected.groups[groupIDX];
        final GroupDocs<BytesRef> actualGroup = actual.groups[groupIDX];
        if (verifyGroupValues) {
            if (idvBasedImplsUsed) {
                if (actualGroup.groupValue.length == 0) {
                    assertNull(expectedGroup.groupValue);
                } else {
                    assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
                }
            } else {
                assertEquals(expectedGroup.groupValue, actualGroup.groupValue);
            }
        }
        if (verifySortValues) {
            assertArrayEquals(expectedGroup.groupSortValues, actualGroup.groupSortValues);
        }
        // TODO
        // assertEquals(expectedGroup.maxScore, actualGroup.maxScore);
        assertEquals(expectedGroup.totalHits, actualGroup.totalHits);
        final ScoreDoc[] expectedFDs = expectedGroup.scoreDocs;
        final ScoreDoc[] actualFDs = actualGroup.scoreDocs;
        assertEquals(expectedFDs.length, actualFDs.length);
        for (int docIDX = 0; docIDX < expectedFDs.length; docIDX++) {
            final FieldDoc expectedFD = (FieldDoc) expectedFDs[docIDX];
            final FieldDoc actualFD = (FieldDoc) actualFDs[docIDX];
            //System.out.println("  actual doc=" + docIDtoID[actualFD.doc] + " score=" + actualFD.score);
            assertEquals(expectedFD.doc, docIDtoID[actualFD.doc]);
            if (testScores) {
                assertEquals(expectedFD.score, actualFD.score, 0.1);
            } else {
            // TODO: too anal for now
            //assertEquals(Float.NaN, actualFD.score);
            }
            if (verifySortValues) {
                assertArrayEquals(expectedFD.fields, actualFD.fields);
            }
        }
    }
}
Also used : FieldDoc(org.apache.lucene.search.FieldDoc) BytesRef(org.apache.lucene.util.BytesRef) ScoreDoc(org.apache.lucene.search.ScoreDoc)

Example 22 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project lucene-solr by apache.

the class TestNumericDocValuesUpdates method testUpdateSegmentWithNoDocValues2.

@Test
public void testUpdateSegmentWithNoDocValues2() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    // prevent merges, otherwise by the time updates are applied
    // (writer.close()), the segments might have merged and that update becomes
    // legit.
    conf.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter writer = new IndexWriter(dir, conf);
    // first segment with NDV
    Document doc = new Document();
    doc.add(new StringField("id", "doc0", Store.NO));
    doc.add(new NumericDocValuesField("ndv", 3));
    writer.addDocument(doc);
    doc = new Document();
    // document without 'ndv' field
    doc.add(new StringField("id", "doc4", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    // second segment with no NDV, but another dv field "foo"
    doc = new Document();
    doc.add(new StringField("id", "doc1", Store.NO));
    doc.add(new NumericDocValuesField("foo", 3));
    writer.addDocument(doc);
    doc = new Document();
    // document that isn't updated
    doc.add(new StringField("id", "doc2", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    // update document in the first segment - should not affect docsWithField of
    // the document without NDV field
    writer.updateNumericDocValue(new Term("id", "doc0"), "ndv", 5L);
    // update document in the second segment - field should be added and we should
    // be able to handle the other document correctly (e.g. no NPE)
    writer.updateNumericDocValue(new Term("id", "doc1"), "ndv", 5L);
    writer.close();
    DirectoryReader reader = DirectoryReader.open(dir);
    for (LeafReaderContext context : reader.leaves()) {
        LeafReader r = context.reader();
        NumericDocValues ndv = r.getNumericDocValues("ndv");
        assertEquals(0, ndv.nextDoc());
        assertEquals(5L, ndv.longValue());
        assertTrue(ndv.nextDoc() > 1);
    }
    reader.close();
    TestUtil.checkIndex(dir);
    conf = newIndexWriterConfig(new MockAnalyzer(random()));
    writer = new IndexWriter(dir, conf);
    writer.forceMerge(1);
    writer.close();
    reader = DirectoryReader.open(dir);
    LeafReader ar = getOnlyLeafReader(reader);
    assertEquals(DocValuesType.NUMERIC, ar.getFieldInfos().fieldInfo("foo").getDocValuesType());
    IndexSearcher searcher = new IndexSearcher(reader);
    TopFieldDocs td;
    // doc0
    td = searcher.search(new TermQuery(new Term("id", "doc0")), 1, new Sort(new SortField("ndv", SortField.Type.LONG)));
    assertEquals(5L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    // doc1
    td = searcher.search(new TermQuery(new Term("id", "doc1")), 1, new Sort(new SortField("ndv", SortField.Type.LONG), new SortField("foo", SortField.Type.LONG)));
    assertEquals(5L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    assertEquals(3L, ((FieldDoc) td.scoreDocs[0]).fields[1]);
    // doc2
    td = searcher.search(new TermQuery(new Term("id", "doc2")), 1, new Sort(new SortField("ndv", SortField.Type.LONG)));
    assertEquals(0L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    // doc4
    td = searcher.search(new TermQuery(new Term("id", "doc4")), 1, new Sort(new SortField("ndv", SortField.Type.LONG)));
    assertEquals(0L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    reader.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) FieldDoc(org.apache.lucene.search.FieldDoc) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) SortField(org.apache.lucene.search.SortField) Document(org.apache.lucene.document.Document) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) StringField(org.apache.lucene.document.StringField) Sort(org.apache.lucene.search.Sort) Directory(org.apache.lucene.store.Directory) NRTCachingDirectory(org.apache.lucene.store.NRTCachingDirectory) Test(org.junit.Test)

Example 23 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project elasticsearch by elastic.

the class SearchService method parseSource.

private void parseSource(DefaultSearchContext context, SearchSourceBuilder source) throws SearchContextException {
    // nothing to parse...
    if (source == null) {
        return;
    }
    QueryShardContext queryShardContext = context.getQueryShardContext();
    context.from(source.from());
    context.size(source.size());
    Map<String, InnerHitBuilder> innerHitBuilders = new HashMap<>();
    if (source.query() != null) {
        InnerHitBuilder.extractInnerHits(source.query(), innerHitBuilders);
        context.parsedQuery(queryShardContext.toQuery(source.query()));
    }
    if (source.postFilter() != null) {
        InnerHitBuilder.extractInnerHits(source.postFilter(), innerHitBuilders);
        context.parsedPostFilter(queryShardContext.toQuery(source.postFilter()));
    }
    if (innerHitBuilders.size() > 0) {
        for (Map.Entry<String, InnerHitBuilder> entry : innerHitBuilders.entrySet()) {
            try {
                entry.getValue().build(context, context.innerHits());
            } catch (IOException e) {
                throw new SearchContextException(context, "failed to build inner_hits", e);
            }
        }
    }
    if (source.sorts() != null) {
        try {
            Optional<SortAndFormats> optionalSort = SortBuilder.buildSort(source.sorts(), context.getQueryShardContext());
            if (optionalSort.isPresent()) {
                context.sort(optionalSort.get());
            }
        } catch (IOException e) {
            throw new SearchContextException(context, "failed to create sort elements", e);
        }
    }
    context.trackScores(source.trackScores());
    if (source.minScore() != null) {
        context.minimumScore(source.minScore());
    }
    if (source.profile()) {
        context.setProfilers(new Profilers(context.searcher()));
    }
    if (source.timeout() != null) {
        context.timeout(source.timeout());
    }
    context.terminateAfter(source.terminateAfter());
    if (source.aggregations() != null) {
        try {
            AggregatorFactories factories = source.aggregations().build(context, null);
            factories.validate();
            context.aggregations(new SearchContextAggregations(factories));
        } catch (IOException e) {
            throw new AggregationInitializationException("Failed to create aggregators", e);
        }
    }
    if (source.suggest() != null) {
        try {
            context.suggest(source.suggest().build(queryShardContext));
        } catch (IOException e) {
            throw new SearchContextException(context, "failed to create SuggestionSearchContext", e);
        }
    }
    if (source.rescores() != null) {
        try {
            for (RescoreBuilder<?> rescore : source.rescores()) {
                context.addRescore(rescore.build(queryShardContext));
            }
        } catch (IOException e) {
            throw new SearchContextException(context, "failed to create RescoreSearchContext", e);
        }
    }
    if (source.explain() != null) {
        context.explain(source.explain());
    }
    if (source.fetchSource() != null) {
        context.fetchSourceContext(source.fetchSource());
    }
    if (source.docValueFields() != null) {
        context.docValueFieldsContext(new DocValueFieldsContext(source.docValueFields()));
    }
    if (source.highlighter() != null) {
        HighlightBuilder highlightBuilder = source.highlighter();
        try {
            context.highlight(highlightBuilder.build(queryShardContext));
        } catch (IOException e) {
            throw new SearchContextException(context, "failed to create SearchContextHighlighter", e);
        }
    }
    if (source.scriptFields() != null) {
        for (org.elasticsearch.search.builder.SearchSourceBuilder.ScriptField field : source.scriptFields()) {
            SearchScript searchScript = scriptService.search(context.lookup(), field.script(), ScriptContext.Standard.SEARCH);
            context.scriptFields().add(new ScriptField(field.fieldName(), searchScript, field.ignoreFailure()));
        }
    }
    if (source.ext() != null) {
        for (SearchExtBuilder searchExtBuilder : source.ext()) {
            context.addSearchExt(searchExtBuilder);
        }
    }
    if (source.version() != null) {
        context.version(source.version());
    }
    if (source.stats() != null) {
        context.groupStats(source.stats());
    }
    if (source.searchAfter() != null && source.searchAfter().length > 0) {
        if (context.scrollContext() != null) {
            throw new SearchContextException(context, "`search_after` cannot be used in a scroll context.");
        }
        if (context.from() > 0) {
            throw new SearchContextException(context, "`from` parameter must be set to 0 when `search_after` is used.");
        }
        FieldDoc fieldDoc = SearchAfterBuilder.buildFieldDoc(context.sort(), source.searchAfter());
        context.searchAfter(fieldDoc);
    }
    if (source.slice() != null) {
        if (context.scrollContext() == null) {
            throw new SearchContextException(context, "`slice` cannot be used outside of a scroll context");
        }
        context.sliceBuilder(source.slice());
    }
    if (source.storedFields() != null) {
        if (source.storedFields().fetchFields() == false) {
            if (context.version()) {
                throw new SearchContextException(context, "`stored_fields` cannot be disabled if version is requested");
            }
            if (context.sourceRequested()) {
                throw new SearchContextException(context, "`stored_fields` cannot be disabled if _source is requested");
            }
        }
        context.storedFieldsContext(source.storedFields());
    }
    if (source.collapse() != null) {
        final CollapseContext collapseContext = source.collapse().build(context);
        context.collapse(collapseContext);
    }
}
Also used : FieldDoc(org.apache.lucene.search.FieldDoc) HashMap(java.util.HashMap) InnerHitBuilder(org.elasticsearch.index.query.InnerHitBuilder) Profilers(org.elasticsearch.search.profile.Profilers) SearchSourceBuilder(org.elasticsearch.search.builder.SearchSourceBuilder) ScriptField(org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField) QueryShardContext(org.elasticsearch.index.query.QueryShardContext) AggregatorFactories(org.elasticsearch.search.aggregations.AggregatorFactories) DocValueFieldsContext(org.elasticsearch.search.fetch.subphase.DocValueFieldsContext) SearchContextAggregations(org.elasticsearch.search.aggregations.SearchContextAggregations) AggregationInitializationException(org.elasticsearch.search.aggregations.AggregationInitializationException) IOException(java.io.IOException) SortAndFormats(org.elasticsearch.search.sort.SortAndFormats) SearchScript(org.elasticsearch.script.SearchScript) Map(java.util.Map) HashMap(java.util.HashMap) HighlightBuilder(org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder) CollapseContext(org.elasticsearch.search.collapse.CollapseContext)

Example 24 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project elasticsearch by elastic.

the class SearchPhaseController method sortDocs.

/**
     * Returns a score doc array of top N search docs across all shards, followed by top suggest docs for each
     * named completion suggestion across all shards. If more than one named completion suggestion is specified in the
     * request, the suggest docs for a named suggestion are ordered by the suggestion name.
     *
     * Note: The order of the sorted score docs depends on the shard index in the result array if the merge process needs to disambiguate
     * the result. In oder to obtain stable results the shard index (index of the result in the result array) must be the same.
     *
     * @param ignoreFrom Whether to ignore the from and sort all hits in each shard result.
     *                   Enabled only for scroll search, because that only retrieves hits of length 'size' in the query phase.
     * @param resultsArr Shard result holder
     */
public ScoreDoc[] sortDocs(boolean ignoreFrom, AtomicArray<? extends QuerySearchResultProvider> resultsArr) throws IOException {
    List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> results = resultsArr.asList();
    if (results.isEmpty()) {
        return EMPTY_DOCS;
    }
    final QuerySearchResult result;
    boolean canOptimize = false;
    int shardIndex = -1;
    if (results.size() == 1) {
        canOptimize = true;
        result = results.get(0).value.queryResult();
        shardIndex = results.get(0).index;
    } else {
        boolean hasResult = false;
        QuerySearchResult resultToOptimize = null;
        // lets see if we only got hits from a single shard, if so, we can optimize...
        for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : results) {
            if (entry.value.queryResult().hasHits()) {
                if (hasResult) {
                    // we already have one, can't really optimize
                    canOptimize = false;
                    break;
                }
                canOptimize = true;
                hasResult = true;
                resultToOptimize = entry.value.queryResult();
                shardIndex = entry.index;
            }
        }
        result = canOptimize ? resultToOptimize : results.get(0).value.queryResult();
        assert result != null;
    }
    if (canOptimize) {
        int offset = result.from();
        if (ignoreFrom) {
            offset = 0;
        }
        ScoreDoc[] scoreDocs = result.topDocs().scoreDocs;
        ScoreDoc[] docs;
        int numSuggestDocs = 0;
        final Suggest suggest = result.queryResult().suggest();
        final List<CompletionSuggestion> completionSuggestions;
        if (suggest != null) {
            completionSuggestions = suggest.filter(CompletionSuggestion.class);
            for (CompletionSuggestion suggestion : completionSuggestions) {
                numSuggestDocs += suggestion.getOptions().size();
            }
        } else {
            completionSuggestions = Collections.emptyList();
        }
        int docsOffset = 0;
        if (scoreDocs.length == 0 || scoreDocs.length < offset) {
            docs = new ScoreDoc[numSuggestDocs];
        } else {
            int resultDocsSize = result.size();
            if ((scoreDocs.length - offset) < resultDocsSize) {
                resultDocsSize = scoreDocs.length - offset;
            }
            docs = new ScoreDoc[resultDocsSize + numSuggestDocs];
            for (int i = 0; i < resultDocsSize; i++) {
                ScoreDoc scoreDoc = scoreDocs[offset + i];
                scoreDoc.shardIndex = shardIndex;
                docs[i] = scoreDoc;
                docsOffset++;
            }
        }
        for (CompletionSuggestion suggestion : completionSuggestions) {
            for (CompletionSuggestion.Entry.Option option : suggestion.getOptions()) {
                ScoreDoc doc = option.getDoc();
                doc.shardIndex = shardIndex;
                docs[docsOffset++] = doc;
            }
        }
        return docs;
    }
    final int topN = result.queryResult().size();
    final int from = ignoreFrom ? 0 : result.queryResult().from();
    final TopDocs mergedTopDocs;
    final int numShards = resultsArr.length();
    if (result.queryResult().topDocs() instanceof CollapseTopFieldDocs) {
        CollapseTopFieldDocs firstTopDocs = (CollapseTopFieldDocs) result.queryResult().topDocs();
        final Sort sort = new Sort(firstTopDocs.fields);
        final CollapseTopFieldDocs[] shardTopDocs = new CollapseTopFieldDocs[numShards];
        fillTopDocs(shardTopDocs, results, new CollapseTopFieldDocs(firstTopDocs.field, 0, new FieldDoc[0], sort.getSort(), new Object[0], Float.NaN));
        mergedTopDocs = CollapseTopFieldDocs.merge(sort, from, topN, shardTopDocs);
    } else if (result.queryResult().topDocs() instanceof TopFieldDocs) {
        TopFieldDocs firstTopDocs = (TopFieldDocs) result.queryResult().topDocs();
        final Sort sort = new Sort(firstTopDocs.fields);
        final TopFieldDocs[] shardTopDocs = new TopFieldDocs[resultsArr.length()];
        fillTopDocs(shardTopDocs, results, new TopFieldDocs(0, new FieldDoc[0], sort.getSort(), Float.NaN));
        mergedTopDocs = TopDocs.merge(sort, from, topN, shardTopDocs, true);
    } else {
        final TopDocs[] shardTopDocs = new TopDocs[resultsArr.length()];
        fillTopDocs(shardTopDocs, results, Lucene.EMPTY_TOP_DOCS);
        mergedTopDocs = TopDocs.merge(from, topN, shardTopDocs, true);
    }
    ScoreDoc[] scoreDocs = mergedTopDocs.scoreDocs;
    final Map<String, List<Suggestion<CompletionSuggestion.Entry>>> groupedCompletionSuggestions = new HashMap<>();
    // group suggestions and assign shard index
    for (AtomicArray.Entry<? extends QuerySearchResultProvider> sortedResult : results) {
        Suggest shardSuggest = sortedResult.value.queryResult().suggest();
        if (shardSuggest != null) {
            for (CompletionSuggestion suggestion : shardSuggest.filter(CompletionSuggestion.class)) {
                suggestion.setShardIndex(sortedResult.index);
                List<Suggestion<CompletionSuggestion.Entry>> suggestions = groupedCompletionSuggestions.computeIfAbsent(suggestion.getName(), s -> new ArrayList<>());
                suggestions.add(suggestion);
            }
        }
    }
    if (groupedCompletionSuggestions.isEmpty() == false) {
        int numSuggestDocs = 0;
        List<Suggestion<? extends Entry<? extends Entry.Option>>> completionSuggestions = new ArrayList<>(groupedCompletionSuggestions.size());
        for (List<Suggestion<CompletionSuggestion.Entry>> groupedSuggestions : groupedCompletionSuggestions.values()) {
            final CompletionSuggestion completionSuggestion = CompletionSuggestion.reduceTo(groupedSuggestions);
            assert completionSuggestion != null;
            numSuggestDocs += completionSuggestion.getOptions().size();
            completionSuggestions.add(completionSuggestion);
        }
        scoreDocs = new ScoreDoc[mergedTopDocs.scoreDocs.length + numSuggestDocs];
        System.arraycopy(mergedTopDocs.scoreDocs, 0, scoreDocs, 0, mergedTopDocs.scoreDocs.length);
        int offset = mergedTopDocs.scoreDocs.length;
        Suggest suggestions = new Suggest(completionSuggestions);
        for (CompletionSuggestion completionSuggestion : suggestions.filter(CompletionSuggestion.class)) {
            for (CompletionSuggestion.Entry.Option option : completionSuggestion.getOptions()) {
                scoreDocs[offset++] = option.getDoc();
            }
        }
    }
    return scoreDocs;
}
Also used : AtomicArray(org.elasticsearch.common.util.concurrent.AtomicArray) FieldDoc(org.apache.lucene.search.FieldDoc) ObjectObjectHashMap(com.carrotsearch.hppc.ObjectObjectHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) IntArrayList(com.carrotsearch.hppc.IntArrayList) CollapseTopFieldDocs(org.apache.lucene.search.grouping.CollapseTopFieldDocs) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) Suggest(org.elasticsearch.search.suggest.Suggest) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) CompletionSuggestion(org.elasticsearch.search.suggest.completion.CompletionSuggestion) Suggestion(org.elasticsearch.search.suggest.Suggest.Suggestion) Entry(org.elasticsearch.search.suggest.Suggest.Suggestion.Entry) Sort(org.apache.lucene.search.Sort) ArrayList(java.util.ArrayList) IntArrayList(com.carrotsearch.hppc.IntArrayList) List(java.util.List) CollapseTopFieldDocs(org.apache.lucene.search.grouping.CollapseTopFieldDocs) CompletionSuggestion(org.elasticsearch.search.suggest.completion.CompletionSuggestion) QuerySearchResult(org.elasticsearch.search.query.QuerySearchResult)

Example 25 with FieldDoc

use of org.apache.lucene.search.FieldDoc in project elasticsearch by elastic.

the class InternalTopHits method doEquals.

// Equals and hashcode implemented for testing round trips
@Override
protected boolean doEquals(Object obj) {
    InternalTopHits other = (InternalTopHits) obj;
    if (from != other.from)
        return false;
    if (size != other.size)
        return false;
    if (topDocs.totalHits != other.topDocs.totalHits)
        return false;
    if (topDocs.scoreDocs.length != other.topDocs.scoreDocs.length)
        return false;
    for (int d = 0; d < topDocs.scoreDocs.length; d++) {
        ScoreDoc thisDoc = topDocs.scoreDocs[d];
        ScoreDoc otherDoc = other.topDocs.scoreDocs[d];
        if (thisDoc.doc != otherDoc.doc)
            return false;
        if (Double.compare(thisDoc.score, otherDoc.score) != 0)
            return false;
        if (thisDoc.shardIndex != otherDoc.shardIndex)
            return false;
        if (thisDoc instanceof FieldDoc) {
            if (false == (otherDoc instanceof FieldDoc))
                return false;
            FieldDoc thisFieldDoc = (FieldDoc) thisDoc;
            FieldDoc otherFieldDoc = (FieldDoc) otherDoc;
            if (thisFieldDoc.fields.length != otherFieldDoc.fields.length)
                return false;
            for (int f = 0; f < thisFieldDoc.fields.length; f++) {
                if (false == thisFieldDoc.fields[f].equals(otherFieldDoc.fields[f]))
                    return false;
            }
        }
    }
    return searchHits.equals(other.searchHits);
}
Also used : FieldDoc(org.apache.lucene.search.FieldDoc) ScoreDoc(org.apache.lucene.search.ScoreDoc)

Aggregations

FieldDoc (org.apache.lucene.search.FieldDoc)50 Sort (org.apache.lucene.search.Sort)26 TopFieldDocs (org.apache.lucene.search.TopFieldDocs)23 SortField (org.apache.lucene.search.SortField)21 ScoreDoc (org.apache.lucene.search.ScoreDoc)20 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)18 IndexSearcher (org.apache.lucene.search.IndexSearcher)17 ArrayList (java.util.ArrayList)14 Directory (org.apache.lucene.store.Directory)14 TopDocs (org.apache.lucene.search.TopDocs)12 RandomIndexWriter (org.apache.lucene.index.RandomIndexWriter)10 TermQuery (org.apache.lucene.search.TermQuery)10 Document (org.apache.lucene.document.Document)9 Term (org.apache.lucene.index.Term)9 BytesRef (org.apache.lucene.util.BytesRef)9 HashMap (java.util.HashMap)8 Query (org.apache.lucene.search.Query)8 IndexReader (org.apache.lucene.index.IndexReader)7 HashSet (java.util.HashSet)5 DirectoryReader (org.apache.lucene.index.DirectoryReader)5