Search in sources :

Example 6 with TopFieldDocs

use of org.apache.lucene.search.TopFieldDocs in project elasticsearch by elastic.

the class SearchPhaseController method getHits.

private SearchHits getHits(ReducedQueryPhase reducedQueryPhase, boolean ignoreFrom, ScoreDoc[] sortedDocs, AtomicArray<? extends QuerySearchResultProvider> fetchResultsArr) {
    List<? extends AtomicArray.Entry<? extends QuerySearchResultProvider>> fetchResults = fetchResultsArr.asList();
    boolean sorted = false;
    int sortScoreIndex = -1;
    if (reducedQueryPhase.oneResult.topDocs() instanceof TopFieldDocs) {
        TopFieldDocs fieldDocs = (TopFieldDocs) reducedQueryPhase.oneResult.queryResult().topDocs();
        if (fieldDocs instanceof CollapseTopFieldDocs && fieldDocs.fields.length == 1 && fieldDocs.fields[0].getType() == SortField.Type.SCORE) {
            sorted = false;
        } else {
            sorted = true;
            for (int i = 0; i < fieldDocs.fields.length; i++) {
                if (fieldDocs.fields[i].getType() == SortField.Type.SCORE) {
                    sortScoreIndex = i;
                }
            }
        }
    }
    // clean the fetch counter
    for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : fetchResults) {
        entry.value.fetchResult().initCounter();
    }
    int from = ignoreFrom ? 0 : reducedQueryPhase.oneResult.queryResult().from();
    int numSearchHits = (int) Math.min(reducedQueryPhase.fetchHits - from, reducedQueryPhase.oneResult.size());
    // with collapsing we can have more fetch hits than sorted docs
    numSearchHits = Math.min(sortedDocs.length, numSearchHits);
    // merge hits
    List<SearchHit> hits = new ArrayList<>();
    if (!fetchResults.isEmpty()) {
        for (int i = 0; i < numSearchHits; i++) {
            ScoreDoc shardDoc = sortedDocs[i];
            QuerySearchResultProvider fetchResultProvider = fetchResultsArr.get(shardDoc.shardIndex);
            if (fetchResultProvider == null) {
                continue;
            }
            FetchSearchResult fetchResult = fetchResultProvider.fetchResult();
            int index = fetchResult.counterGetAndIncrement();
            if (index < fetchResult.hits().internalHits().length) {
                SearchHit searchHit = fetchResult.hits().internalHits()[index];
                searchHit.score(shardDoc.score);
                searchHit.shard(fetchResult.shardTarget());
                if (sorted) {
                    FieldDoc fieldDoc = (FieldDoc) shardDoc;
                    searchHit.sortValues(fieldDoc.fields, reducedQueryPhase.oneResult.sortValueFormats());
                    if (sortScoreIndex != -1) {
                        searchHit.score(((Number) fieldDoc.fields[sortScoreIndex]).floatValue());
                    }
                }
                hits.add(searchHit);
            }
        }
    }
    return new SearchHits(hits.toArray(new SearchHit[hits.size()]), reducedQueryPhase.totalHits, reducedQueryPhase.maxScore);
}
Also used : AtomicArray(org.elasticsearch.common.util.concurrent.AtomicArray) QuerySearchResultProvider(org.elasticsearch.search.query.QuerySearchResultProvider) SearchHit(org.elasticsearch.search.SearchHit) FieldDoc(org.apache.lucene.search.FieldDoc) FetchSearchResult(org.elasticsearch.search.fetch.FetchSearchResult) ArrayList(java.util.ArrayList) IntArrayList(com.carrotsearch.hppc.IntArrayList) CollapseTopFieldDocs(org.apache.lucene.search.grouping.CollapseTopFieldDocs) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) ScoreDoc(org.apache.lucene.search.ScoreDoc) SearchHits(org.elasticsearch.search.SearchHits) CollapseTopFieldDocs(org.apache.lucene.search.grouping.CollapseTopFieldDocs)

Example 7 with TopFieldDocs

use of org.apache.lucene.search.TopFieldDocs in project elasticsearch by elastic.

the class CollapsingTopDocsCollectorTests method assertSearchCollapse.

private <T extends Comparable> void assertSearchCollapse(CollapsingDocValuesProducer<T> dvProducers, boolean numeric, boolean multivalued) throws IOException {
    final int numDocs = randomIntBetween(1000, 2000);
    int maxGroup = randomIntBetween(2, 500);
    final Directory dir = newDirectory();
    final RandomIndexWriter w = new RandomIndexWriter(random(), dir);
    Set<T> values = new HashSet<>();
    int totalHits = 0;
    for (int i = 0; i < numDocs; i++) {
        final T value = dvProducers.randomGroup(maxGroup);
        values.add(value);
        Document doc = new Document();
        dvProducers.add(doc, value, multivalued);
        doc.add(new NumericDocValuesField("sort1", randomIntBetween(0, 10)));
        doc.add(new NumericDocValuesField("sort2", randomLong()));
        w.addDocument(doc);
        totalHits++;
    }
    List<T> valueList = new ArrayList<>(values);
    Collections.sort(valueList);
    final IndexReader reader = w.getReader();
    final IndexSearcher searcher = newSearcher(reader);
    final SortField collapseField = dvProducers.sortField(multivalued);
    final SortField sort1 = new SortField("sort1", SortField.Type.INT);
    final SortField sort2 = new SortField("sort2", SortField.Type.LONG);
    Sort sort = new Sort(sort1, sort2, collapseField);
    int expectedNumGroups = values.size();
    final CollapsingTopDocsCollector collapsingCollector;
    if (numeric) {
        collapsingCollector = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, false);
    } else {
        collapsingCollector = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, false);
    }
    TopFieldCollector topFieldCollector = TopFieldCollector.create(sort, totalHits, true, false, false);
    searcher.search(new MatchAllDocsQuery(), collapsingCollector);
    searcher.search(new MatchAllDocsQuery(), topFieldCollector);
    CollapseTopFieldDocs collapseTopFieldDocs = collapsingCollector.getTopDocs();
    TopFieldDocs topDocs = topFieldCollector.topDocs();
    assertEquals(collapseField.getField(), collapseTopFieldDocs.field);
    assertEquals(expectedNumGroups, collapseTopFieldDocs.scoreDocs.length);
    assertEquals(totalHits, collapseTopFieldDocs.totalHits);
    assertEquals(totalHits, topDocs.scoreDocs.length);
    assertEquals(totalHits, topDocs.totalHits);
    Set<Object> seen = new HashSet<>();
    // collapse field is the last sort
    int collapseIndex = sort.getSort().length - 1;
    int topDocsIndex = 0;
    for (int i = 0; i < expectedNumGroups; i++) {
        FieldDoc fieldDoc = null;
        for (; topDocsIndex < totalHits; topDocsIndex++) {
            fieldDoc = (FieldDoc) topDocs.scoreDocs[topDocsIndex];
            if (seen.contains(fieldDoc.fields[collapseIndex]) == false) {
                break;
            }
        }
        FieldDoc collapseFieldDoc = (FieldDoc) collapseTopFieldDocs.scoreDocs[i];
        assertNotNull(fieldDoc);
        assertEquals(collapseFieldDoc.doc, fieldDoc.doc);
        assertArrayEquals(collapseFieldDoc.fields, fieldDoc.fields);
        seen.add(fieldDoc.fields[fieldDoc.fields.length - 1]);
    }
    for (; topDocsIndex < totalHits; topDocsIndex++) {
        FieldDoc fieldDoc = (FieldDoc) topDocs.scoreDocs[topDocsIndex];
        assertTrue(seen.contains(fieldDoc.fields[collapseIndex]));
    }
    // check merge
    final IndexReaderContext ctx = searcher.getTopReaderContext();
    final SegmentSearcher[] subSearchers;
    final int[] docStarts;
    if (ctx instanceof LeafReaderContext) {
        subSearchers = new SegmentSearcher[1];
        docStarts = new int[1];
        subSearchers[0] = new SegmentSearcher((LeafReaderContext) ctx, ctx);
        docStarts[0] = 0;
    } else {
        final CompositeReaderContext compCTX = (CompositeReaderContext) ctx;
        final int size = compCTX.leaves().size();
        subSearchers = new SegmentSearcher[size];
        docStarts = new int[size];
        int docBase = 0;
        for (int searcherIDX = 0; searcherIDX < subSearchers.length; searcherIDX++) {
            final LeafReaderContext leave = compCTX.leaves().get(searcherIDX);
            subSearchers[searcherIDX] = new SegmentSearcher(leave, compCTX);
            docStarts[searcherIDX] = docBase;
            docBase += leave.reader().maxDoc();
        }
    }
    final CollapseTopFieldDocs[] shardHits = new CollapseTopFieldDocs[subSearchers.length];
    final Weight weight = searcher.createNormalizedWeight(new MatchAllDocsQuery(), false);
    for (int shardIDX = 0; shardIDX < subSearchers.length; shardIDX++) {
        final SegmentSearcher subSearcher = subSearchers[shardIDX];
        final CollapsingTopDocsCollector c;
        if (numeric) {
            c = CollapsingTopDocsCollector.createNumeric(collapseField.getField(), sort, expectedNumGroups, false);
        } else {
            c = CollapsingTopDocsCollector.createKeyword(collapseField.getField(), sort, expectedNumGroups, false);
        }
        subSearcher.search(weight, c);
        shardHits[shardIDX] = c.getTopDocs();
    }
    CollapseTopFieldDocs mergedFieldDocs = CollapseTopFieldDocs.merge(sort, 0, expectedNumGroups, shardHits);
    assertTopDocsEquals(mergedFieldDocs, collapseTopFieldDocs);
    w.close();
    reader.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) FieldDoc(org.apache.lucene.search.FieldDoc) ArrayList(java.util.ArrayList) CollapseTopFieldDocs(org.apache.lucene.search.grouping.CollapseTopFieldDocs) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) SortField(org.apache.lucene.search.SortField) SortedSetSortField(org.apache.lucene.search.SortedSetSortField) SortedNumericSortField(org.apache.lucene.search.SortedNumericSortField) Document(org.apache.lucene.document.Document) SortedNumericDocValuesField(org.apache.lucene.document.SortedNumericDocValuesField) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) Sort(org.apache.lucene.search.Sort) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) TopFieldCollector(org.apache.lucene.search.TopFieldCollector) CollapseTopFieldDocs(org.apache.lucene.search.grouping.CollapseTopFieldDocs) Directory(org.apache.lucene.store.Directory) HashSet(java.util.HashSet) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) IndexReaderContext(org.apache.lucene.index.IndexReaderContext) Weight(org.apache.lucene.search.Weight) CompositeReaderContext(org.apache.lucene.index.CompositeReaderContext) IndexReader(org.apache.lucene.index.IndexReader) CollapsingTopDocsCollector(org.apache.lucene.search.grouping.CollapsingTopDocsCollector) RandomIndexWriter(org.apache.lucene.index.RandomIndexWriter)

Example 8 with TopFieldDocs

use of org.apache.lucene.search.TopFieldDocs in project bigbluebutton by bigbluebutton.

the class SearchController method onSubmit.

/*
	 * (non-Javadoc)
	 * 
	 * @see
	 * org.springframework.web.servlet.mvc.SimpleFormController#onSubmit(javax
	 * .servlet.http.HttpServletRequest, javax.servlet.http.HttpServletResponse,
	 * java.lang.Object, org.springframework.validation.BindException)
	 */
@Override
protected ModelAndView onSubmit(HttpServletRequest request, HttpServletResponse response, Object command, BindException errors) throws Exception {
    SearchCriteriaCommand srchCriteriaCommand = (SearchCriteriaCommand) command;
    int startFrom = (new Integer(srchCriteriaCommand.getStartFrom())).intValue();
    int endIndex = 0;
    String queryStr = srchCriteriaCommand.getKeyWords();
    String sortBy = srchCriteriaCommand.getSort();
    String operator = srchCriteriaCommand.getOperator();
    String relRange = srchCriteriaCommand.getRangeValue();
    boolean bSmart = (relRange != null) && (!relRange.isEmpty());
    boolean bSortByScore = sortBy.equalsIgnoreCase("byScore");
    if (logger.isInfoEnabled()) {
        logger.info("---search offset=" + startFrom + " sortBy=" + sortBy + "qryString=" + queryStr + "operator=" + operator);
    }
    Map<String, Object> model = new HashMap<String, Object>();
    LinkedHashMap<String, MatchVO> sortedMap = new LinkedHashMap<String, MatchVO>();
    Map<String, SessionHitsOrganizer> hitsOrganizerMap = new HashMap<String, SessionHitsOrganizer>();
    Map<String, String> resultMap = new HashMap<String, String>();
    synchronized (Index.getInstance()) {
        Search search = Search.getInstance();
        search.startSearch();
        TopDocs tps = null;
        Searcher searcher = null;
        ScoreDoc[] hits = null;
        if (bSortByScore) {
            Search.TopDocCollectorSearchResult result = search.searchByScore(queryStr, startFrom, operator);
            TopDocCollector collector = result.getCollector();
            if (collector != null) {
                tps = collector.topDocs();
            }
            hits = tps.scoreDocs;
            searcher = result.getSearcher();
        } else {
            Search.TopFieldDocsSearchResult result = search.searchBySession(queryStr, startFrom, operator);
            TopFieldDocs tfd = result.getTopFieldDocs();
            if (tfd != null) {
                hits = tfd.scoreDocs;
            }
            searcher = result.getSearcher();
        }
        if (hits == null) {
            if (logger.isInfoEnabled()) {
                logger.info("---No hit");
            }
        } else {
            int start = startFrom;
            int end = hits.length;
            endIndex = end;
            if (logger.isInfoEnabled()) {
                logger.info("total match number=" + endIndex);
            }
            String currentSession = "0";
            String lastSession = "0";
            SessionHitsOrganizer hitsOrganizer = null;
            for (int i = start; i < end; i++) {
                float score = hits[i].score;
                Document doc = searcher.doc(hits[i].doc);
                String path = doc.get("path");
                if (path != null) {
                    MatchVO matchVO = new MatchVO();
                    matchVO.setFilePath(path);
                    String fullContent = doc.get("title");
                    String summary = getKeywordContext(queryStr, fullContent);
                    matchVO.setContentSummary(summary);
                    String fileName = doc.get("fileName");
                    matchVO.setFileName(fileName);
                    String indexSummary = doc.get("summary");
                    matchVO.setIndexingSummary(indexSummary);
                    matchVO.setScore(score);
                    String title = indexSummary + ": " + fileName + " (Match Score = " + score + ")";
                    //String content = doc.get("contents");								
                    String allData = title + "%" + summary;
                    if (doc.get("slideTime") != null) {
                        allData += "%" + doc.get("slideTime");
                        matchVO.setSlidePlayTime(doc.get("slideTime"));
                    }
                    //sortedMap.put(path, allData);
                    sortedMap.put(path, matchVO);
                    //model.put(path, newTitle+"%"+doc.get("summary")+"%"+doc.get("slideTime"));
                    if (logger.isInfoEnabled()) {
                        logger.info("----" + allData);
                        logger.info((i + 1) + ". " + path);
                    }
                    if (title != null) {
                        if (logger.isInfoEnabled()) {
                            logger.info("   Title: " + doc.get("title"));
                        }
                    }
                    if (bSmart) {
                        //Prepare for the grouping results						
                        currentSession = getSessionNumberFromFileURL(path);
                        //get existing current session organizer
                        hitsOrganizer = hitsOrganizerMap.get(currentSession);
                        if (hitsOrganizer == null) {
                            //create a new session organizer object
                            hitsOrganizer = new SessionHitsOrganizer();
                            hitsOrganizer.setSessionNum(currentSession);
                            hitsOrganizerMap.put(currentSession, hitsOrganizer);
                        }
                        hitsOrganizer.setReleventRange((new Float(relRange)).floatValue());
                        hitsOrganizer.addExactHits(path, score);
                        matchVO.setSessionHitOrganier(hitsOrganizer);
                    }
                } else {
                    System.out.println((i + 1) + ". " + "No path for this document");
                }
            }
        }
        search.finishSearch();
        //post processing for result grouping...			
        Iterator hitsOrganizerIt = hitsOrganizerMap.keySet().iterator();
        while (hitsOrganizerIt.hasNext()) {
            String key = (String) hitsOrganizerIt.next();
            SessionHitsOrganizer organizer = hitsOrganizerMap.get(key);
            organizer.generateResultGroup();
        }
        model.put("result", sortedMap);
        if (bSmart) {
            model.put("hitsOrganizer", hitsOrganizerMap);
        }
        model.put("searchKeyword", queryStr);
        model.put("startFrom", (new Integer(startFrom)).toString());
        model.put("endAt", (new Integer(endIndex)).toString());
        model.put("sortBy", sortBy);
        model.put("operator", operator);
        model.put("rangeValue", relRange);
    }
    ModelAndView mav = new ModelAndView(this.getSuccessView(), model);
    return mav;
}
Also used : SearchCriteriaCommand(org.bigbluebutton.webminer.web.model.SearchCriteriaCommand) HashMap(java.util.HashMap) LinkedHashMap(java.util.LinkedHashMap) ModelAndView(org.springframework.web.servlet.ModelAndView) MatchVO(org.bigbluebutton.webminer.web.model.MatchVO) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) Document(org.apache.lucene.document.Document) LinkedHashMap(java.util.LinkedHashMap) ScoreDoc(org.apache.lucene.search.ScoreDoc) TopDocs(org.apache.lucene.search.TopDocs) SessionHitsOrganizer(org.bigbluebutton.webminer.web.model.SessionHitsOrganizer) Search(org.bigbluebutton.webminer.search.Search) Iterator(java.util.Iterator) Searcher(org.apache.lucene.search.Searcher) TopDocCollector(org.apache.lucene.search.TopDocCollector)

Example 9 with TopFieldDocs

use of org.apache.lucene.search.TopFieldDocs in project bigbluebutton by bigbluebutton.

the class Search method searchBySession.

public TopFieldDocsSearchResult searchBySession(String queryStr, int startFrom, String operator) {
    try {
        queryStr = queryStr.trim();
        QueryParser parser = new QueryParser("contents", analyzer);
        Operator op = QueryParser.AND_OPERATOR;
        if (QueryParser.AND_OPERATOR.toString().equalsIgnoreCase(operator)) {
            parser.setDefaultOperator(QueryParser.AND_OPERATOR);
        } else {
            parser.setDefaultOperator(QueryParser.OR_OPERATOR);
        }
        Query query;
        query = parser.parse(queryStr);
        Sort sort = new Sort("summary", true);
        TopFieldDocs tfd = searcher.search(query, null, startFrom + 10, sort);
        TopFieldDocsSearchResult result = new TopFieldDocsSearchResult(tfd, searcher);
        return result;
    } catch (ParseException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    } catch (IOException e) {
        // TODO Auto-generated catch block
        e.printStackTrace();
    }
    return null;
}
Also used : Operator(org.apache.lucene.queryParser.QueryParser.Operator) QueryParser(org.apache.lucene.queryParser.QueryParser) Query(org.apache.lucene.search.Query) Sort(org.apache.lucene.search.Sort) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) ParseException(org.apache.lucene.queryParser.ParseException) IOException(java.io.IOException)

Example 10 with TopFieldDocs

use of org.apache.lucene.search.TopFieldDocs in project lucene-solr by apache.

the class TestNumericDocValuesUpdates method testUpdateSegmentWithNoDocValues2.

@Test
public void testUpdateSegmentWithNoDocValues2() throws Exception {
    Directory dir = newDirectory();
    IndexWriterConfig conf = newIndexWriterConfig(new MockAnalyzer(random()));
    // prevent merges, otherwise by the time updates are applied
    // (writer.close()), the segments might have merged and that update becomes
    // legit.
    conf.setMergePolicy(NoMergePolicy.INSTANCE);
    IndexWriter writer = new IndexWriter(dir, conf);
    // first segment with NDV
    Document doc = new Document();
    doc.add(new StringField("id", "doc0", Store.NO));
    doc.add(new NumericDocValuesField("ndv", 3));
    writer.addDocument(doc);
    doc = new Document();
    // document without 'ndv' field
    doc.add(new StringField("id", "doc4", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    // second segment with no NDV, but another dv field "foo"
    doc = new Document();
    doc.add(new StringField("id", "doc1", Store.NO));
    doc.add(new NumericDocValuesField("foo", 3));
    writer.addDocument(doc);
    doc = new Document();
    // document that isn't updated
    doc.add(new StringField("id", "doc2", Store.NO));
    writer.addDocument(doc);
    writer.commit();
    // update document in the first segment - should not affect docsWithField of
    // the document without NDV field
    writer.updateNumericDocValue(new Term("id", "doc0"), "ndv", 5L);
    // update document in the second segment - field should be added and we should
    // be able to handle the other document correctly (e.g. no NPE)
    writer.updateNumericDocValue(new Term("id", "doc1"), "ndv", 5L);
    writer.close();
    DirectoryReader reader = DirectoryReader.open(dir);
    for (LeafReaderContext context : reader.leaves()) {
        LeafReader r = context.reader();
        NumericDocValues ndv = r.getNumericDocValues("ndv");
        assertEquals(0, ndv.nextDoc());
        assertEquals(5L, ndv.longValue());
        assertTrue(ndv.nextDoc() > 1);
    }
    reader.close();
    TestUtil.checkIndex(dir);
    conf = newIndexWriterConfig(new MockAnalyzer(random()));
    writer = new IndexWriter(dir, conf);
    writer.forceMerge(1);
    writer.close();
    reader = DirectoryReader.open(dir);
    LeafReader ar = getOnlyLeafReader(reader);
    assertEquals(DocValuesType.NUMERIC, ar.getFieldInfos().fieldInfo("foo").getDocValuesType());
    IndexSearcher searcher = new IndexSearcher(reader);
    TopFieldDocs td;
    // doc0
    td = searcher.search(new TermQuery(new Term("id", "doc0")), 1, new Sort(new SortField("ndv", SortField.Type.LONG)));
    assertEquals(5L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    // doc1
    td = searcher.search(new TermQuery(new Term("id", "doc1")), 1, new Sort(new SortField("ndv", SortField.Type.LONG), new SortField("foo", SortField.Type.LONG)));
    assertEquals(5L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    assertEquals(3L, ((FieldDoc) td.scoreDocs[0]).fields[1]);
    // doc2
    td = searcher.search(new TermQuery(new Term("id", "doc2")), 1, new Sort(new SortField("ndv", SortField.Type.LONG)));
    assertEquals(0L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    // doc4
    td = searcher.search(new TermQuery(new Term("id", "doc4")), 1, new Sort(new SortField("ndv", SortField.Type.LONG)));
    assertEquals(0L, ((FieldDoc) td.scoreDocs[0]).fields[0]);
    reader.close();
    dir.close();
}
Also used : IndexSearcher(org.apache.lucene.search.IndexSearcher) TermQuery(org.apache.lucene.search.TermQuery) FieldDoc(org.apache.lucene.search.FieldDoc) TopFieldDocs(org.apache.lucene.search.TopFieldDocs) SortField(org.apache.lucene.search.SortField) Document(org.apache.lucene.document.Document) MockAnalyzer(org.apache.lucene.analysis.MockAnalyzer) NumericDocValuesField(org.apache.lucene.document.NumericDocValuesField) StringField(org.apache.lucene.document.StringField) Sort(org.apache.lucene.search.Sort) Directory(org.apache.lucene.store.Directory) NRTCachingDirectory(org.apache.lucene.store.NRTCachingDirectory) Test(org.junit.Test)

Aggregations

TopFieldDocs (org.apache.lucene.search.TopFieldDocs)41 Sort (org.apache.lucene.search.Sort)30 SortField (org.apache.lucene.search.SortField)24 FieldDoc (org.apache.lucene.search.FieldDoc)23 IndexSearcher (org.apache.lucene.search.IndexSearcher)19 MatchAllDocsQuery (org.apache.lucene.search.MatchAllDocsQuery)18 TermQuery (org.apache.lucene.search.TermQuery)15 Document (org.apache.lucene.document.Document)14 ScoreDoc (org.apache.lucene.search.ScoreDoc)12 ArrayList (java.util.ArrayList)11 Term (org.apache.lucene.index.Term)11 Query (org.apache.lucene.search.Query)11 BytesRef (org.apache.lucene.util.BytesRef)9 TopDocs (org.apache.lucene.search.TopDocs)8 Directory (org.apache.lucene.store.Directory)8 HashMap (java.util.HashMap)6 StringField (org.apache.lucene.document.StringField)6 CollapseTopFieldDocs (org.apache.lucene.search.grouping.CollapseTopFieldDocs)5 IOException (java.io.IOException)4 MockAnalyzer (org.apache.lucene.analysis.MockAnalyzer)4