use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class TaxonomyFacetSumIntAssociations method sumValues.
private final void sumValues(List<MatchingDocs> matchingDocs) throws IOException {
//System.out.println("count matchingDocs=" + matchingDocs + " facetsField=" + facetsFieldName);
for (MatchingDocs hits : matchingDocs) {
BinaryDocValues dv = hits.context.reader().getBinaryDocValues(indexFieldName);
if (dv == null) {
// this reader does not have DocValues for the requested category list
continue;
}
DocIdSetIterator docs = hits.bits.iterator();
int doc;
while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
// BytesRef getAssociation()?
if (dv.docID() < doc) {
dv.advance(doc);
}
if (dv.docID() == doc) {
final BytesRef bytesRef = dv.binaryValue();
byte[] bytes = bytesRef.bytes;
int end = bytesRef.offset + bytesRef.length;
int offset = bytesRef.offset;
while (offset < end) {
int ord = ((bytes[offset] & 0xFF) << 24) | ((bytes[offset + 1] & 0xFF) << 16) | ((bytes[offset + 2] & 0xFF) << 8) | (bytes[offset + 3] & 0xFF);
offset += 4;
int value = ((bytes[offset] & 0xFF) << 24) | ((bytes[offset + 1] & 0xFF) << 16) | ((bytes[offset + 2] & 0xFF) << 8) | (bytes[offset + 3] & 0xFF);
offset += 4;
values[ord] += value;
}
}
}
}
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class TaxonomyFacetSumValueSource method sumValues.
private void sumValues(List<MatchingDocs> matchingDocs, boolean keepScores, DoubleValuesSource valueSource) throws IOException {
IntsRef scratch = new IntsRef();
for (MatchingDocs hits : matchingDocs) {
OrdinalsReader.OrdinalsSegmentReader ords = ordinalsReader.getReader(hits.context);
DoubleValues scores = keepScores ? scores(hits) : null;
DoubleValues functionValues = valueSource.getValues(hits.context, scores);
DocIdSetIterator docs = hits.bits.iterator();
int doc;
while ((doc = docs.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
ords.get(doc, scratch);
if (functionValues.advanceExact(doc)) {
float value = (float) functionValues.doubleValue();
for (int i = 0; i < scratch.length; i++) {
values[scratch.ints[i]] += value;
}
}
}
}
rollup();
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class LongRangeFacetCounts method count.
private void count(LongValuesSource valueSource, List<MatchingDocs> matchingDocs) throws IOException {
LongRange[] ranges = (LongRange[]) this.ranges;
LongRangeCounter counter = new LongRangeCounter(ranges);
int missingCount = 0;
for (MatchingDocs hits : matchingDocs) {
LongValues fv = valueSource.getValues(hits.context, null);
totCount += hits.totalHits;
final DocIdSetIterator fastMatchDocs;
if (fastMatchQuery != null) {
final IndexReaderContext topLevelContext = ReaderUtil.getTopLevelContext(hits.context);
final IndexSearcher searcher = new IndexSearcher(topLevelContext);
searcher.setQueryCache(null);
final Weight fastMatchWeight = searcher.createNormalizedWeight(fastMatchQuery, false);
Scorer s = fastMatchWeight.scorer(hits.context);
if (s == null) {
continue;
}
fastMatchDocs = s.iterator();
} else {
fastMatchDocs = null;
}
DocIdSetIterator docs = hits.bits.iterator();
for (int doc = docs.nextDoc(); doc != DocIdSetIterator.NO_MORE_DOCS; ) {
if (fastMatchDocs != null) {
int fastMatchDoc = fastMatchDocs.docID();
if (fastMatchDoc < doc) {
fastMatchDoc = fastMatchDocs.advance(doc);
}
if (doc != fastMatchDoc) {
doc = docs.advance(fastMatchDoc);
continue;
}
}
// Skip missing docs:
if (fv.advanceExact(doc)) {
counter.add(fv.longValue());
} else {
missingCount++;
}
doc = docs.nextDoc();
}
}
int x = counter.fillCounts(counts);
missingCount += x;
//System.out.println("totCount " + totCount + " missingCount " + counter.missingCount);
totCount -= missingCount;
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class CheckJoinIndex method check.
/**
* Check that the given index is good to use for block joins.
* @throws IllegalStateException if the index does not have an appropriate structure
*/
public static void check(IndexReader reader, BitSetProducer parentsFilter) throws IOException {
for (LeafReaderContext context : reader.leaves()) {
if (context.reader().maxDoc() == 0) {
continue;
}
final BitSet parents = parentsFilter.getBitSet(context);
if (parents == null || parents.cardinality() == 0) {
throw new IllegalStateException("Every segment should have at least one parent, but " + context.reader() + " does not have any");
}
if (parents.get(context.reader().maxDoc() - 1) == false) {
throw new IllegalStateException("The last document of a segment must always be a parent, but " + context.reader() + " has a child as a last doc");
}
final Bits liveDocs = context.reader().getLiveDocs();
if (liveDocs != null) {
int prevParentDoc = -1;
DocIdSetIterator it = new BitSetIterator(parents, 0L);
for (int parentDoc = it.nextDoc(); parentDoc != DocIdSetIterator.NO_MORE_DOCS; parentDoc = it.nextDoc()) {
final boolean parentIsLive = liveDocs.get(parentDoc);
for (int child = prevParentDoc + 1; child != parentDoc; child++) {
final boolean childIsLive = liveDocs.get(child);
if (parentIsLive != childIsLive) {
if (childIsLive) {
throw new IllegalStateException("Parent doc " + parentDoc + " of segment " + context.reader() + " is live but has a deleted child document " + child);
} else {
throw new IllegalStateException("Parent doc " + parentDoc + " of segment " + context.reader() + " is deleted but has a live child document " + child);
}
}
}
prevParentDoc = parentDoc;
}
}
}
}
use of org.apache.lucene.search.DocIdSetIterator in project lucene-solr by apache.
the class ParentChildrenBlockJoinQuery method createWeight.
@Override
public Weight createWeight(IndexSearcher searcher, boolean needsScores, float boost) throws IOException {
final Weight childWeight = childQuery.createWeight(searcher, needsScores, boost);
final int readerIndex = ReaderUtil.subIndex(parentDocId, searcher.getIndexReader().leaves());
return new Weight(this) {
@Override
public void extractTerms(Set<Term> terms) {
childWeight.extractTerms(terms);
}
@Override
public Explanation explain(LeafReaderContext context, int doc) throws IOException {
return Explanation.noMatch("Not implemented, use ToParentBlockJoinQuery explain why a document matched");
}
@Override
public Scorer scorer(LeafReaderContext context) throws IOException {
// Childs docs only reside in a single segment, so no need to evaluate all segments
if (context.ord != readerIndex) {
return null;
}
final int localParentDocId = parentDocId - context.docBase;
// before the parent doc and because parent doc is 0 we can safely assume that there are no child docs.
if (localParentDocId == 0) {
return null;
}
final BitSet parents = parentFilter.getBitSet(context);
final int firstChildDocId = parents.prevSetBit(localParentDocId - 1) + 1;
// A parent doc doesn't have child docs, so we can early exit here:
if (firstChildDocId == localParentDocId) {
return null;
}
final Scorer childrenScorer = childWeight.scorer(context);
if (childrenScorer == null) {
return null;
}
DocIdSetIterator childrenIterator = childrenScorer.iterator();
final DocIdSetIterator it = new DocIdSetIterator() {
int doc = -1;
@Override
public int docID() {
return doc;
}
@Override
public int nextDoc() throws IOException {
return advance(doc + 1);
}
@Override
public int advance(int target) throws IOException {
target = Math.max(firstChildDocId, target);
if (target >= localParentDocId) {
// We're outside the child nested scope, so it is done
return doc = NO_MORE_DOCS;
} else {
int advanced = childrenIterator.advance(target);
if (advanced >= localParentDocId) {
// We're outside the child nested scope, so it is done
return doc = NO_MORE_DOCS;
} else {
return doc = advanced;
}
}
}
@Override
public long cost() {
return Math.min(childrenIterator.cost(), localParentDocId - firstChildDocId);
}
};
return new Scorer(this) {
@Override
public int docID() {
return it.docID();
}
@Override
public float score() throws IOException {
return childrenScorer.score();
}
@Override
public int freq() throws IOException {
return childrenScorer.freq();
}
@Override
public DocIdSetIterator iterator() {
return it;
}
};
}
};
}
Aggregations