use of org.apache.lucene.search.DocIdSet in project lucene-solr by apache.
the class AnalyticsStats method execute.
/**
* Calculates the analytics requested in the Parameters.
*
* @return List of results formated to mirror the input XML.
* @throws IOException if execution fails
*/
public NamedList<?> execute() throws IOException {
statsCollector.startRequest();
NamedList<Object> res = new NamedList<>();
List<AnalyticsRequest> requests;
requests = AnalyticsRequestFactory.parse(searcher.getSchema(), params);
if (requests == null || requests.size() == 0) {
return res;
}
statsCollector.addRequests(requests.size());
// Get filter to all docs
Filter filter = docs.getTopFilter();
// Computing each Analytics Request Separately
for (AnalyticsRequest areq : requests) {
// The Accumulator which will control the statistics generation
// for the entire analytics request
ValueAccumulator accumulator;
// The number of total facet requests
int facets = areq.getFieldFacets().size() + areq.getRangeFacets().size() + areq.getQueryFacets().size();
try {
if (facets == 0) {
accumulator = BasicAccumulator.create(searcher, docs, areq);
} else {
accumulator = FacetingAccumulator.create(searcher, docs, areq, req);
}
} catch (IOException e) {
log.warn("Analytics request '" + areq.getName() + "' failed", e);
continue;
}
statsCollector.addStatsCollected(((BasicAccumulator) accumulator).getNumStatsCollectors());
statsCollector.addStatsRequests(areq.getExpressions().size());
statsCollector.addFieldFacets(areq.getFieldFacets().size());
statsCollector.addRangeFacets(areq.getRangeFacets().size());
statsCollector.addQueryFacets(areq.getQueryFacets().size());
statsCollector.addQueries(((BasicAccumulator) accumulator).getNumQueries());
// Loop through the documents returned by the query and add to accumulator
List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
for (int leafNum = 0; leafNum < contexts.size(); leafNum++) {
LeafReaderContext context = contexts.get(leafNum);
// solr docsets already exclude any deleted docs
DocIdSet dis = filter.getDocIdSet(context, null);
DocIdSetIterator disi = null;
if (dis != null) {
disi = dis.iterator();
}
if (disi != null) {
accumulator.getLeafCollector(context);
int doc = disi.nextDoc();
while (doc != DocIdSetIterator.NO_MORE_DOCS) {
// Add a document to the statistics being generated
accumulator.collect(doc);
doc = disi.nextDoc();
}
}
}
// do some post-processing
accumulator.postProcess();
// compute the stats
accumulator.compute();
res.add(areq.getName(), accumulator.export());
}
statsCollector.endRequest();
return res;
}
use of org.apache.lucene.search.DocIdSet in project lucene-solr by apache.
the class BlockJoinDocSetFacetComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
final BlockJoinParentQParser.AllParentsAware bjq = (BlockJoinParentQParser.AllParentsAware) rb.req.getContext().get(bjqKey);
if (bjq != null) {
final DocSet parentResult = rb.getResults().docSet;
final BitDocSet allParentsBitsDocSet = rb.req.getSearcher().getDocSetBits(bjq.getParentQuery());
final DocSet allChildren = BlockJoin.toChildren(parentResult, allParentsBitsDocSet, rb.req.getSearcher().getDocSetBits(new MatchAllDocsQuery()), QueryContext.newContext(rb.req.getSearcher()));
final DocSet childQueryDocSet = rb.req.getSearcher().getDocSet(bjq.getChildQuery());
final DocSet selectedChildren = allChildren.intersection(childQueryDocSet);
// don't include parent into facet counts
//childResult = childResult.union(parentResult);// just to mimic the current logic
final List<LeafReaderContext> leaves = rb.req.getSearcher().getIndexReader().leaves();
Filter filter = selectedChildren.getTopFilter();
final BlockJoinFacetAccsHolder facetCounter = new BlockJoinFacetAccsHolder(rb.req);
for (int subIdx = 0; subIdx < leaves.size(); subIdx++) {
LeafReaderContext subCtx = leaves.get(subIdx);
// solr docsets already exclude any deleted docs
DocIdSet dis = filter.getDocIdSet(subCtx, null);
AggregatableDocIter iter = new SegmentChildren(subCtx, dis, allParentsBitsDocSet);
if (iter.hasNext()) {
facetCounter.doSetNextReader(subCtx);
facetCounter.countFacets(iter);
}
}
facetCounter.finish();
rb.req.getContext().put(COLLECTOR_CONTEXT_PARAM, facetCounter);
super.process(rb);
}
}
Aggregations