use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class TestDocsWithFieldSet method testDense.
public void testDense() throws IOException {
DocsWithFieldSet set = new DocsWithFieldSet();
DocIdSetIterator it = set.iterator();
assertEquals(DocIdSetIterator.NO_MORE_DOCS, it.nextDoc());
set.add(0);
it = set.iterator();
assertEquals(0, it.nextDoc());
assertEquals(DocIdSetIterator.NO_MORE_DOCS, it.nextDoc());
long ramBytesUsed = set.ramBytesUsed();
for (int i = 1; i < 1000; ++i) {
set.add(i);
}
assertEquals(ramBytesUsed, set.ramBytesUsed());
it = set.iterator();
for (int i = 0; i < 1000; ++i) {
assertEquals(i, it.nextDoc());
}
assertEquals(DocIdSetIterator.NO_MORE_DOCS, it.nextDoc());
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class Filter method createWeight.
//
// Query compatibility
//
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
return new Weight(this) {
@Override
public void extractTerms(Set<Term> terms) {
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
final Scorer scorer = scorer(context);
final boolean match = (scorer != null && scorer.iterator().advance(doc) == doc);
if (match) {
assert scorer.score() == 0f;
return Explanation.match(0f, "Match on id " + doc);
} else {
return Explanation.match(0f, "No match on id " + doc);
}
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
final DocIdSet set = getDocIdSet(context, null);
if (set == null) {
return null;
}
if (applyLazily && set.bits() != null) {
final Bits bits = set.bits();
final DocIdSetIterator approximation = DocIdSetIterator.all(context.reader().maxDoc());
final TwoPhaseIterator twoPhase = new TwoPhaseIterator(approximation) {
@Override
public boolean matches() throws IOException {
return bits.get(approximation.docID());
}
@Override
public float matchCost() {
// TODO use cost of bits.get()
return 10;
}
};
return new ConstantScoreScorer(this, 0f, twoPhase);
}
final DocIdSetIterator iterator = set.iterator();
if (iterator == null) {
return null;
}
return new ConstantScoreScorer(this, 0f, iterator);
}
};
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class SortedSetDocValuesFacetCounts method countOneSegment.
private void countOneSegment(MultiDocValues.OrdinalMap ordinalMap, LeafReader reader, int segOrd, MatchingDocs hits) throws IOException {
SortedSetDocValues segValues = reader.getSortedSetDocValues(field);
if (segValues == null) {
// nothing to count
return;
}
DocIdSetIterator it;
if (hits == null) {
it = segValues;
} else {
it = ConjunctionDISI.intersectIterators(Arrays.asList(hits.bits.iterator(), segValues));
}
// segs)
if (ordinalMap != null) {
final LongValues ordMap = ordinalMap.getGlobalOrds(segOrd);
int numSegOrds = (int) segValues.getValueCount();
if (hits != null && hits.totalHits < numSegOrds / 10) {
// Remap every ord to global ord as we iterate:
for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
int term = (int) segValues.nextOrd();
while (term != SortedSetDocValues.NO_MORE_ORDS) {
//System.out.println(" segOrd=" + segOrd + " ord=" + term + " globalOrd=" + ordinalMap.getGlobalOrd(segOrd, term));
counts[(int) ordMap.get(term)]++;
term = (int) segValues.nextOrd();
}
}
} else {
//System.out.println(" count in seg ord first");
// First count in seg-ord space:
final int[] segCounts = new int[numSegOrds];
for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
int term = (int) segValues.nextOrd();
while (term != SortedSetDocValues.NO_MORE_ORDS) {
//System.out.println(" ord=" + term);
segCounts[term]++;
term = (int) segValues.nextOrd();
}
}
// Then, migrate to global ords:
for (int ord = 0; ord < numSegOrds; ord++) {
int count = segCounts[ord];
if (count != 0) {
//System.out.println(" migrate segOrd=" + segOrd + " ord=" + ord + " globalOrd=" + ordinalMap.getGlobalOrd(segOrd, ord));
counts[(int) ordMap.get(ord)] += count;
}
}
}
} else {
// just aggregate directly into counts:
for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
int term = (int) segValues.nextOrd();
while (term != SortedSetDocValues.NO_MORE_ORDS) {
counts[term]++;
term = (int) segValues.nextOrd();
}
}
}
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class FastTaxonomyFacetCounts method count.
private final void count(List<MatchingDocs> matchingDocs) throws IOException {
for (MatchingDocs hits : matchingDocs) {
BinaryDocValues dv = hits.context.reader().getBinaryDocValues(indexFieldName);
if (dv == null) {
// this reader does not have DocValues for the requested category list
continue;
}
DocIdSetIterator it = ConjunctionDISI.intersectIterators(Arrays.asList(hits.bits.iterator(), dv));
for (int doc = it.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; doc = it.nextDoc()) {
final BytesRef bytesRef = dv.binaryValue();
byte[] bytes = bytesRef.bytes;
int end = bytesRef.offset + bytesRef.length;
int ord = 0;
int offset = bytesRef.offset;
int prev = 0;
while (offset < end) {
byte b = bytes[offset++];
if (b >= 0) {
prev = ord = ((ord << 7) | b) + prev;
++values[ord];
ord = 0;
} else {
ord = (ord << 7) | (b & 0x7F);
}
}
}
}
rollup();
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class TaxonomyFacetSumFloatAssociations method sumValues.
private final void sumValues(List<MatchingDocs> matchingDocs) throws IOException {
//System.out.println("count matchingDocs=" + matchingDocs + " facetsField=" + facetsFieldName);
for (MatchingDocs hits : matchingDocs) {
BinaryDocValues dv = hits.context.reader().getBinaryDocValues(indexFieldName);
if (dv == null) {
// this reader does not have DocValues for the requested category list
continue;
}
DocIdSetIterator docs = hits.bits.iterator();
int doc;
while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
// BytesRef getAssociation()?
if (dv.docID() < doc) {
dv.advance(doc);
}
if (dv.docID() == doc) {
final BytesRef bytesRef = dv.binaryValue();
byte[] bytes = bytesRef.bytes;
int end = bytesRef.offset + bytesRef.length;
int offset = bytesRef.offset;
while (offset < end) {
int ord = ((bytes[offset] & 0xFF) << 24) | ((bytes[offset + 1] & 0xFF) << 16) | ((bytes[offset + 2] & 0xFF) << 8) | (bytes[offset + 3] & 0xFF);
offset += 4;
int value = ((bytes[offset] & 0xFF) << 24) | ((bytes[offset + 1] & 0xFF) << 16) | ((bytes[offset + 2] & 0xFF) << 8) | (bytes[offset + 3] & 0xFF);
offset += 4;
values[ord] += Float.intBitsToFloat(value);
}
}
}
}
}
Aggregations