Search in sources :

Example 26 with DocIdSet

use of org.apache.lucene.search.DocIdSet in project lucene-solr by apache.

the class AnalyticsStats method execute.

/**
   * Calculates the analytics requested in the Parameters.
   * 
   * @return List of results formated to mirror the input XML.
   * @throws IOException if execution fails
   */
public NamedList<?> execute() throws IOException {
    statsCollector.startRequest();
    NamedList<Object> res = new NamedList<>();
    List<AnalyticsRequest> requests;
    requests = AnalyticsRequestFactory.parse(searcher.getSchema(), params);
    if (requests == null || requests.size() == 0) {
        return res;
    }
    statsCollector.addRequests(requests.size());
    // Get filter to all docs
    Filter filter = docs.getTopFilter();
    // Computing each Analytics Request Separately
    for (AnalyticsRequest areq : requests) {
        // The Accumulator which will control the statistics generation
        // for the entire analytics request
        ValueAccumulator accumulator;
        // The number of total facet requests
        int facets = areq.getFieldFacets().size() + areq.getRangeFacets().size() + areq.getQueryFacets().size();
        try {
            if (facets == 0) {
                accumulator = BasicAccumulator.create(searcher, docs, areq);
            } else {
                accumulator = FacetingAccumulator.create(searcher, docs, areq, req);
            }
        } catch (IOException e) {
            log.warn("Analytics request '" + areq.getName() + "' failed", e);
            continue;
        }
        statsCollector.addStatsCollected(((BasicAccumulator) accumulator).getNumStatsCollectors());
        statsCollector.addStatsRequests(areq.getExpressions().size());
        statsCollector.addFieldFacets(areq.getFieldFacets().size());
        statsCollector.addRangeFacets(areq.getRangeFacets().size());
        statsCollector.addQueryFacets(areq.getQueryFacets().size());
        statsCollector.addQueries(((BasicAccumulator) accumulator).getNumQueries());
        // Loop through the documents returned by the query and add to accumulator
        List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
        for (int leafNum = 0; leafNum < contexts.size(); leafNum++) {
            LeafReaderContext context = contexts.get(leafNum);
            // solr docsets already exclude any deleted docs
            DocIdSet dis = filter.getDocIdSet(context, null);
            DocIdSetIterator disi = null;
            if (dis != null) {
                disi = dis.iterator();
            }
            if (disi != null) {
                accumulator.getLeafCollector(context);
                int doc = disi.nextDoc();
                while (doc != DocIdSetIterator.NO_MORE_DOCS) {
                    // Add a document to the statistics being generated
                    accumulator.collect(doc);
                    doc = disi.nextDoc();
                }
            }
        }
        // do some post-processing
        accumulator.postProcess();
        // compute the stats
        accumulator.compute();
        res.add(areq.getName(), accumulator.export());
    }
    statsCollector.endRequest();
    return res;
}
Also used : NamedList(org.apache.solr.common.util.NamedList) ValueAccumulator(org.apache.solr.analytics.accumulator.ValueAccumulator) DocIdSet(org.apache.lucene.search.DocIdSet) IOException(java.io.IOException) Filter(org.apache.solr.search.Filter) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) DocIdSetIterator(org.apache.lucene.search.DocIdSetIterator)

Example 27 with DocIdSet

use of org.apache.lucene.search.DocIdSet in project lucene-solr by apache.

the class BlockJoinDocSetFacetComponent method process.

@Override
public void process(ResponseBuilder rb) throws IOException {
    final BlockJoinParentQParser.AllParentsAware bjq = (BlockJoinParentQParser.AllParentsAware) rb.req.getContext().get(bjqKey);
    if (bjq != null) {
        final DocSet parentResult = rb.getResults().docSet;
        final BitDocSet allParentsBitsDocSet = rb.req.getSearcher().getDocSetBits(bjq.getParentQuery());
        final DocSet allChildren = BlockJoin.toChildren(parentResult, allParentsBitsDocSet, rb.req.getSearcher().getDocSetBits(new MatchAllDocsQuery()), QueryContext.newContext(rb.req.getSearcher()));
        final DocSet childQueryDocSet = rb.req.getSearcher().getDocSet(bjq.getChildQuery());
        final DocSet selectedChildren = allChildren.intersection(childQueryDocSet);
        // don't include parent into facet counts
        //childResult = childResult.union(parentResult);// just to mimic the current logic
        final List<LeafReaderContext> leaves = rb.req.getSearcher().getIndexReader().leaves();
        Filter filter = selectedChildren.getTopFilter();
        final BlockJoinFacetAccsHolder facetCounter = new BlockJoinFacetAccsHolder(rb.req);
        for (int subIdx = 0; subIdx < leaves.size(); subIdx++) {
            LeafReaderContext subCtx = leaves.get(subIdx);
            // solr docsets already exclude any deleted docs
            DocIdSet dis = filter.getDocIdSet(subCtx, null);
            AggregatableDocIter iter = new SegmentChildren(subCtx, dis, allParentsBitsDocSet);
            if (iter.hasNext()) {
                facetCounter.doSetNextReader(subCtx);
                facetCounter.countFacets(iter);
            }
        }
        facetCounter.finish();
        rb.req.getContext().put(COLLECTOR_CONTEXT_PARAM, facetCounter);
        super.process(rb);
    }
}
Also used : AggregatableDocIter(org.apache.solr.search.join.BlockJoinFieldFacetAccumulator.AggregatableDocIter) DocIdSet(org.apache.lucene.search.DocIdSet) MatchAllDocsQuery(org.apache.lucene.search.MatchAllDocsQuery) BitDocSet(org.apache.solr.search.BitDocSet) Filter(org.apache.solr.search.Filter) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) BitDocSet(org.apache.solr.search.BitDocSet) DocSet(org.apache.solr.search.DocSet)

Aggregations

DocIdSet (org.apache.lucene.search.DocIdSet)27 LeafReaderContext (org.apache.lucene.index.LeafReaderContext)16 DocIdSetIterator (org.apache.lucene.search.DocIdSetIterator)14 Filter (org.apache.solr.search.Filter)6 Bits (org.apache.lucene.util.Bits)5 FixedBitSet (org.apache.lucene.util.FixedBitSet)5 LeafReader (org.apache.lucene.index.LeafReader)4 SortedDocValues (org.apache.lucene.index.SortedDocValues)4 SortedSetDocValues (org.apache.lucene.index.SortedSetDocValues)4 BitDocIdSet (org.apache.lucene.util.BitDocIdSet)4 IOException (java.io.IOException)3 ConstantScoreScorer (org.apache.lucene.search.ConstantScoreScorer)3 IndexSearcher (org.apache.lucene.search.IndexSearcher)3 Scorer (org.apache.lucene.search.Scorer)3 Weight (org.apache.lucene.search.Weight)3 IndexReader (org.apache.lucene.index.IndexReader)2 IndexReaderContext (org.apache.lucene.index.IndexReaderContext)2 MultiDocValues (org.apache.lucene.index.MultiDocValues)2 MultiSortedSetDocValues (org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues)2 OrdinalMap (org.apache.lucene.index.MultiDocValues.OrdinalMap)2