use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class TestFixedBitSet method doIterate1.
void doIterate1(java.util.BitSet a, FixedBitSet b) throws IOException {
assertEquals(a.cardinality(), b.cardinality());
int aa = -1, bb = -1;
DocIdSetIterator iterator = new BitSetIterator(b, 0);
do {
aa = a.nextSetBit(aa + 1);
bb = (bb < b.length() && random().nextBoolean()) ? iterator.nextDoc() : iterator.advance(bb + 1);
assertEquals(aa == -1 ? DocIdSetIterator.NO_MORE_DOCS : aa, bb);
} while (aa >= 0);
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class BaseDocIdSetTestCase method assertEquals.
/** Assert that the content of the {@link DocIdSet} is the same as the content of the {@link BitSet}. */
public void assertEquals(int numBits, BitSet ds1, T ds2) throws IOException {
// nextDoc
DocIdSetIterator it2 = ds2.iterator();
if (it2 == null) {
assertEquals(-1, ds1.nextSetBit(0));
} else {
assertEquals(-1, it2.docID());
for (int doc = ds1.nextSetBit(0); doc != -1; doc = ds1.nextSetBit(doc + 1)) {
assertEquals(doc, it2.nextDoc());
assertEquals(doc, it2.docID());
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, it2.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, it2.docID());
}
// nextDoc / advance
it2 = ds2.iterator();
if (it2 == null) {
assertEquals(-1, ds1.nextSetBit(0));
} else {
for (int doc = -1; doc != DocIdSetIterator.NO_MORE_DOCS; ) {
if (random().nextBoolean()) {
doc = ds1.nextSetBit(doc + 1);
if (doc == -1) {
doc = DocIdSetIterator.NO_MORE_DOCS;
}
assertEquals(doc, it2.nextDoc());
assertEquals(doc, it2.docID());
} else {
final int target = doc + 1 + random().nextInt(random().nextBoolean() ? 64 : Math.max(numBits / 8, 1));
doc = ds1.nextSetBit(target);
if (doc == -1) {
doc = DocIdSetIterator.NO_MORE_DOCS;
}
assertEquals(doc, it2.advance(target));
assertEquals(doc, it2.docID());
}
}
}
// bits()
final Bits bits = ds2.bits();
if (bits != null) {
// test consistency between bits and iterator
it2 = ds2.iterator();
for (int previousDoc = -1, doc = it2.nextDoc(); ; previousDoc = doc, doc = it2.nextDoc()) {
final int max = doc == DocIdSetIterator.NO_MORE_DOCS ? bits.length() : doc;
for (int i = previousDoc + 1; i < max; ++i) {
assertEquals(false, bits.get(i));
}
if (doc == DocIdSetIterator.NO_MORE_DOCS) {
break;
}
assertEquals(true, bits.get(doc));
}
}
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class AnalyticsStats method execute.
/**
* Calculates the analytics requested in the Parameters.
*
* @return List of results formated to mirror the input XML.
* @throws IOException if execution fails
*/
public NamedList<?> execute() throws IOException {
statsCollector.startRequest();
NamedList<Object> res = new NamedList<>();
List<AnalyticsRequest> requests;
requests = AnalyticsRequestFactory.parse(searcher.getSchema(), params);
if (requests == null || requests.size() == 0) {
return res;
}
statsCollector.addRequests(requests.size());
// Get filter to all docs
Filter filter = docs.getTopFilter();
// Computing each Analytics Request Separately
for (AnalyticsRequest areq : requests) {
// The Accumulator which will control the statistics generation
// for the entire analytics request
ValueAccumulator accumulator;
// The number of total facet requests
int facets = areq.getFieldFacets().size() + areq.getRangeFacets().size() + areq.getQueryFacets().size();
try {
if (facets == 0) {
accumulator = BasicAccumulator.create(searcher, docs, areq);
} else {
accumulator = FacetingAccumulator.create(searcher, docs, areq, req);
}
} catch (IOException e) {
log.warn("Analytics request '" + areq.getName() + "' failed", e);
continue;
}
statsCollector.addStatsCollected(((BasicAccumulator) accumulator).getNumStatsCollectors());
statsCollector.addStatsRequests(areq.getExpressions().size());
statsCollector.addFieldFacets(areq.getFieldFacets().size());
statsCollector.addRangeFacets(areq.getRangeFacets().size());
statsCollector.addQueryFacets(areq.getQueryFacets().size());
statsCollector.addQueries(((BasicAccumulator) accumulator).getNumQueries());
// Loop through the documents returned by the query and add to accumulator
List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
for (int leafNum = 0; leafNum < contexts.size(); leafNum++) {
LeafReaderContext context = contexts.get(leafNum);
// solr docsets already exclude any deleted docs
DocIdSet dis = filter.getDocIdSet(context, null);
DocIdSetIterator disi = null;
if (dis != null) {
disi = dis.iterator();
}
if (disi != null) {
accumulator.getLeafCollector(context);
int doc = disi.nextDoc();
while (doc != DocIdSetIterator.NO_MORE_DOCS) {
// Add a document to the statistics being generated
accumulator.collect(doc);
doc = disi.nextDoc();
}
}
}
// do some post-processing
accumulator.postProcess();
// compute the stats
accumulator.compute();
res.add(areq.getName(), accumulator.export());
}
statsCollector.endRequest();
return res;
}
Aggregations