Search in sources :

Example 1 with ValueAccumulator

use of org.apache.solr.analytics.accumulator.ValueAccumulator in project lucene-solr by apache.

the class AnalyticsStats method execute.

/**
   * Calculates the analytics requested in the Parameters.
   * 
   * @return List of results formated to mirror the input XML.
   * @throws IOException if execution fails
   */
public NamedList<?> execute() throws IOException {
    statsCollector.startRequest();
    NamedList<Object> res = new NamedList<>();
    List<AnalyticsRequest> requests;
    requests = AnalyticsRequestFactory.parse(searcher.getSchema(), params);
    if (requests == null || requests.size() == 0) {
        return res;
    }
    statsCollector.addRequests(requests.size());
    // Get filter to all docs
    Filter filter = docs.getTopFilter();
    // Computing each Analytics Request Separately
    for (AnalyticsRequest areq : requests) {
        // The Accumulator which will control the statistics generation
        // for the entire analytics request
        ValueAccumulator accumulator;
        // The number of total facet requests
        int facets = areq.getFieldFacets().size() + areq.getRangeFacets().size() + areq.getQueryFacets().size();
        try {
            if (facets == 0) {
                accumulator = BasicAccumulator.create(searcher, docs, areq);
            } else {
                accumulator = FacetingAccumulator.create(searcher, docs, areq, req);
            }
        } catch (IOException e) {
            log.warn("Analytics request '" + areq.getName() + "' failed", e);
            continue;
        }
        statsCollector.addStatsCollected(((BasicAccumulator) accumulator).getNumStatsCollectors());
        statsCollector.addStatsRequests(areq.getExpressions().size());
        statsCollector.addFieldFacets(areq.getFieldFacets().size());
        statsCollector.addRangeFacets(areq.getRangeFacets().size());
        statsCollector.addQueryFacets(areq.getQueryFacets().size());
        statsCollector.addQueries(((BasicAccumulator) accumulator).getNumQueries());
        // Loop through the documents returned by the query and add to accumulator
        List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
        for (int leafNum = 0; leafNum < contexts.size(); leafNum++) {
            LeafReaderContext context = contexts.get(leafNum);
            // solr docsets already exclude any deleted docs
            DocIdSet dis = filter.getDocIdSet(context, null);
            DocIdSetIterator disi = null;
            if (dis != null) {
                disi = dis.iterator();
            }
            if (disi != null) {
                accumulator.getLeafCollector(context);
                int doc = disi.nextDoc();
                while (doc != DocIdSetIterator.NO_MORE_DOCS) {
                    // Add a document to the statistics being generated
                    accumulator.collect(doc);
                    doc = disi.nextDoc();
                }
            }
        }
        // do some post-processing
        accumulator.postProcess();
        // compute the stats
        accumulator.compute();
        res.add(areq.getName(), accumulator.export());
    }
    statsCollector.endRequest();
    return res;
}
Also used : NamedList(org.apache.solr.common.util.NamedList) ValueAccumulator(org.apache.solr.analytics.accumulator.ValueAccumulator) DocIdSet(org.apache.lucene.search.DocIdSet) IOException(java.io.IOException) Filter(org.apache.solr.search.Filter) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) DocIdSetIterator(org.apache.lucene.search.DocIdSetIterator)

Aggregations

IOException (java.io.IOException)1 LeafReaderContext (org.apache.lucene.index.LeafReaderContext)1 DocIdSet (org.apache.lucene.search.DocIdSet)1 DocIdSetIterator (org.apache.lucene.search.DocIdSetIterator)1 ValueAccumulator (org.apache.solr.analytics.accumulator.ValueAccumulator)1 NamedList (org.apache.solr.common.util.NamedList)1 Filter (org.apache.solr.search.Filter)1