Search in sources :

Example 1 with MultiPostingsEnum

use of org.apache.lucene.index.MultiPostingsEnum in project lucene-solr by apache.

the class SimpleFacets method getFacetTermEnumCounts.

/**
   * Returns a list of terms in the specified field along with the 
   * corresponding count of documents in the set that match that constraint.
   * This method uses the FilterCache to get the intersection count between <code>docs</code>
   * and the DocSet for each term in the filter.
   *
   * @see FacetParams#FACET_LIMIT
   * @see FacetParams#FACET_ZEROS
   * @see FacetParams#FACET_MISSING
   */
public NamedList<Integer> getFacetTermEnumCounts(SolrIndexSearcher searcher, DocSet docs, String field, int offset, int limit, int mincount, boolean missing, String sort, String prefix, Predicate<BytesRef> termFilter, boolean intersectsCheck) throws IOException {
    /* :TODO: potential optimization...
    * cache the Terms with the highest docFreq and try them first
    * don't enum if we get our max from them
    */
    // Minimum term docFreq in order to use the filterCache for that term.
    int minDfFilterCache = global.getFieldInt(field, FacetParams.FACET_ENUM_CACHE_MINDF, 0);
    // make sure we have a set that is fast for random access, if we will use it for that
    DocSet fastForRandomSet = docs;
    if (minDfFilterCache > 0 && docs instanceof SortedIntDocSet) {
        SortedIntDocSet sset = (SortedIntDocSet) docs;
        fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
    }
    IndexSchema schema = searcher.getSchema();
    FieldType ft = schema.getFieldType(field);
    assert !ft.isPointField() : "Point Fields don't support enum method";
    LeafReader r = searcher.getSlowAtomicReader();
    boolean sortByCount = sort.equals("count") || sort.equals("true");
    final int maxsize = limit >= 0 ? offset + limit : Integer.MAX_VALUE - 1;
    final BoundedTreeSet<CountPair<BytesRef, Integer>> queue = sortByCount ? new BoundedTreeSet<CountPair<BytesRef, Integer>>(maxsize) : null;
    final NamedList<Integer> res = new NamedList<>();
    // the smallest value in the top 'N' values    
    int min = mincount - 1;
    int off = offset;
    int lim = limit >= 0 ? limit : Integer.MAX_VALUE;
    BytesRef prefixTermBytes = null;
    if (prefix != null) {
        String indexedPrefix = ft.toInternal(prefix);
        prefixTermBytes = new BytesRef(indexedPrefix);
    }
    Fields fields = r.fields();
    Terms terms = fields == null ? null : fields.terms(field);
    TermsEnum termsEnum = null;
    SolrIndexSearcher.DocsEnumState deState = null;
    BytesRef term = null;
    if (terms != null) {
        termsEnum = terms.iterator();
        if (prefixTermBytes != null) {
            if (termsEnum.seekCeil(prefixTermBytes) == TermsEnum.SeekStatus.END) {
                termsEnum = null;
            } else {
                term = termsEnum.term();
            }
        } else {
            // position termsEnum on first term
            term = termsEnum.next();
        }
    }
    PostingsEnum postingsEnum = null;
    CharsRefBuilder charsRef = new CharsRefBuilder();
    if (docs.size() >= mincount) {
        while (term != null) {
            if (prefixTermBytes != null && !StringHelper.startsWith(term, prefixTermBytes))
                break;
            if (termFilter == null || termFilter.test(term)) {
                int df = termsEnum.docFreq();
                // make a large difference (for example, many terms with df=1).
                if (df > 0 && df > min) {
                    int c;
                    if (df >= minDfFilterCache) {
                        if (deState == null) {
                            deState = new SolrIndexSearcher.DocsEnumState();
                            deState.fieldName = field;
                            deState.liveDocs = r.getLiveDocs();
                            deState.termsEnum = termsEnum;
                            deState.postingsEnum = postingsEnum;
                        }
                        if (intersectsCheck) {
                            c = searcher.intersects(docs, deState) ? 1 : 0;
                        } else {
                            c = searcher.numDocs(docs, deState);
                        }
                        postingsEnum = deState.postingsEnum;
                    } else {
                        // iterate over TermDocs to calculate the intersection
                        // TODO: specialize when base docset is a bitset or hash set (skipDocs)?  or does it matter for this?
                        // TODO: do this per-segment for better efficiency (MultiDocsEnum just uses base class impl)
                        // TODO: would passing deleted docs lead to better efficiency over checking the fastForRandomSet?
                        postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE);
                        c = 0;
                        if (postingsEnum instanceof MultiPostingsEnum) {
                            MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
                            int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
                            SEGMENTS_LOOP: for (int subindex = 0; subindex < numSubs; subindex++) {
                                MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
                                if (sub.postingsEnum == null)
                                    continue;
                                int base = sub.slice.start;
                                int docid;
                                while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                                    if (fastForRandomSet.exists(docid + base)) {
                                        c++;
                                        if (intersectsCheck) {
                                            assert c == 1;
                                            break SEGMENTS_LOOP;
                                        }
                                    }
                                }
                            }
                        } else {
                            int docid;
                            while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                                if (fastForRandomSet.exists(docid)) {
                                    c++;
                                    if (intersectsCheck) {
                                        assert c == 1;
                                        break;
                                    }
                                }
                            }
                        }
                    }
                    if (sortByCount) {
                        if (c > min) {
                            BytesRef termCopy = BytesRef.deepCopyOf(term);
                            queue.add(new CountPair<>(termCopy, c));
                            if (queue.size() >= maxsize)
                                min = queue.last().val;
                        }
                    } else {
                        if (c >= mincount && --off < 0) {
                            if (--lim < 0)
                                break;
                            ft.indexedToReadable(term, charsRef);
                            res.add(charsRef.toString(), c);
                        }
                    }
                }
            }
            term = termsEnum.next();
        }
    }
    if (sortByCount) {
        for (CountPair<BytesRef, Integer> p : queue) {
            if (--off >= 0)
                continue;
            if (--lim < 0)
                break;
            ft.indexedToReadable(p.key, charsRef);
            res.add(charsRef.toString(), p.val);
        }
    }
    if (missing) {
        res.add(null, getFieldMissingCount(searcher, docs, field));
    }
    return res;
}
Also used : SortedIntDocSet(org.apache.solr.search.SortedIntDocSet) HashDocSet(org.apache.solr.search.HashDocSet) TermsEnum(org.apache.lucene.index.TermsEnum) CharsRefBuilder(org.apache.lucene.util.CharsRefBuilder) MultiPostingsEnum(org.apache.lucene.index.MultiPostingsEnum) PostingsEnum(org.apache.lucene.index.PostingsEnum) BytesRef(org.apache.lucene.util.BytesRef) LeafReader(org.apache.lucene.index.LeafReader) NamedList(org.apache.solr.common.util.NamedList) Terms(org.apache.lucene.index.Terms) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) FieldType(org.apache.solr.schema.FieldType) Fields(org.apache.lucene.index.Fields) MultiPostingsEnum(org.apache.lucene.index.MultiPostingsEnum) IndexSchema(org.apache.solr.schema.IndexSchema) HashDocSet(org.apache.solr.search.HashDocSet) DocSet(org.apache.solr.search.DocSet) SortedIntDocSet(org.apache.solr.search.SortedIntDocSet) BitDocSet(org.apache.solr.search.BitDocSet)

Example 2 with MultiPostingsEnum

use of org.apache.lucene.index.MultiPostingsEnum in project lucene-solr by apache.

the class SolrIndexSearcher method getDocSet.

/** @lucene.internal */
public DocSet getDocSet(DocsEnumState deState) throws IOException {
    int largestPossible = deState.termsEnum.docFreq();
    boolean useCache = filterCache != null && largestPossible >= deState.minSetSizeCached;
    TermQuery key = null;
    if (useCache) {
        key = new TermQuery(new Term(deState.fieldName, deState.termsEnum.term()));
        DocSet result = filterCache.get(key);
        if (result != null)
            return result;
    }
    int smallSetSize = DocSetUtil.smallSetSize(maxDoc());
    int scratchSize = Math.min(smallSetSize, largestPossible);
    if (deState.scratch == null || deState.scratch.length < scratchSize)
        deState.scratch = new int[scratchSize];
    final int[] docs = deState.scratch;
    int upto = 0;
    int bitsSet = 0;
    FixedBitSet fbs = null;
    PostingsEnum postingsEnum = deState.termsEnum.postings(deState.postingsEnum, PostingsEnum.NONE);
    postingsEnum = BitsFilteredPostingsEnum.wrap(postingsEnum, deState.liveDocs);
    if (deState.postingsEnum == null) {
        deState.postingsEnum = postingsEnum;
    }
    if (postingsEnum instanceof MultiPostingsEnum) {
        MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
        int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
        for (int subindex = 0; subindex < numSubs; subindex++) {
            MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
            if (sub.postingsEnum == null)
                continue;
            int base = sub.slice.start;
            int docid;
            if (largestPossible > docs.length) {
                if (fbs == null)
                    fbs = new FixedBitSet(maxDoc());
                while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                    fbs.set(docid + base);
                    bitsSet++;
                }
            } else {
                while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                    docs[upto++] = docid + base;
                }
            }
        }
    } else {
        int docid;
        if (largestPossible > docs.length) {
            fbs = new FixedBitSet(maxDoc());
            while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                fbs.set(docid);
                bitsSet++;
            }
        } else {
            while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                docs[upto++] = docid;
            }
        }
    }
    DocSet result;
    if (fbs != null) {
        for (int i = 0; i < upto; i++) {
            fbs.set(docs[i]);
        }
        bitsSet += upto;
        result = new BitDocSet(fbs, bitsSet);
    } else {
        result = upto == 0 ? DocSet.EMPTY : new SortedIntDocSet(Arrays.copyOf(docs, upto));
    }
    if (useCache) {
        filterCache.put(key, result);
    }
    return result;
}
Also used : Term(org.apache.lucene.index.Term) IndexFingerprint(org.apache.solr.update.IndexFingerprint) MultiPostingsEnum(org.apache.lucene.index.MultiPostingsEnum) FixedBitSet(org.apache.lucene.util.FixedBitSet) MultiPostingsEnum(org.apache.lucene.index.MultiPostingsEnum) PostingsEnum(org.apache.lucene.index.PostingsEnum)

Example 3 with MultiPostingsEnum

use of org.apache.lucene.index.MultiPostingsEnum in project lucene-solr by apache.

the class FacetFieldProcessorByEnumTermsStream method _nextBucket.

private SimpleOrderedMap<Object> _nextBucket() throws IOException {
    DocSet termSet = null;
    try {
        while (term != null) {
            if (startTermBytes != null && !StringHelper.startsWith(term, startTermBytes)) {
                break;
            }
            int df = termsEnum.docFreq();
            if (df < effectiveMincount) {
                term = termsEnum.next();
                continue;
            }
            if (termSet != null) {
                // termSet.decref(); // OFF-HEAP
                termSet = null;
            }
            int c = 0;
            if (hasSubFacets || df >= minDfFilterCache) {
                if (deState == null) {
                    deState = new SolrIndexSearcher.DocsEnumState();
                    deState.fieldName = sf.getName();
                    deState.liveDocs = fcontext.searcher.getSlowAtomicReader().getLiveDocs();
                    deState.termsEnum = termsEnum;
                    deState.postingsEnum = postingsEnum;
                    deState.minSetSizeCached = minDfFilterCache;
                }
                if (hasSubFacets || !countOnly) {
                    DocSet termsAll = fcontext.searcher.getDocSet(deState);
                    termSet = docs.intersection(termsAll);
                    // termsAll.decref(); // OFF-HEAP
                    c = termSet.size();
                } else {
                    c = fcontext.searcher.numDocs(docs, deState);
                }
                postingsEnum = deState.postingsEnum;
                resetStats();
                if (!countOnly) {
                    collect(termSet, 0);
                }
            } else {
                // We don't need the docset here (meaning no sub-facets).
                // if countOnly, then we are calculating some other stats...
                resetStats();
                // lazy convert to fastForRandomSet
                if (fastForRandomSet == null) {
                    fastForRandomSet = docs;
                    if (docs instanceof SortedIntDocSet) {
                        // OFF-HEAP todo: also check for native version
                        SortedIntDocSet sset = (SortedIntDocSet) docs;
                        fastForRandomSet = new HashDocSet(sset.getDocs(), 0, sset.size());
                    }
                }
                // iterate over TermDocs to calculate the intersection
                postingsEnum = termsEnum.postings(postingsEnum, PostingsEnum.NONE);
                if (postingsEnum instanceof MultiPostingsEnum) {
                    MultiPostingsEnum.EnumWithSlice[] subs = ((MultiPostingsEnum) postingsEnum).getSubs();
                    int numSubs = ((MultiPostingsEnum) postingsEnum).getNumSubs();
                    for (int subindex = 0; subindex < numSubs; subindex++) {
                        MultiPostingsEnum.EnumWithSlice sub = subs[subindex];
                        if (sub.postingsEnum == null)
                            continue;
                        int base = sub.slice.start;
                        int docid;
                        if (countOnly) {
                            while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                                if (fastForRandomSet.exists(docid + base))
                                    c++;
                            }
                        } else {
                            setNextReader(leaves[sub.slice.readerIndex]);
                            while ((docid = sub.postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                                if (fastForRandomSet.exists(docid + base)) {
                                    c++;
                                    collect(docid, 0);
                                }
                            }
                        }
                    }
                } else {
                    int docid;
                    if (countOnly) {
                        while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                            if (fastForRandomSet.exists(docid))
                                c++;
                        }
                    } else {
                        setNextReader(leaves[0]);
                        while ((docid = postingsEnum.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) {
                            if (fastForRandomSet.exists(docid)) {
                                c++;
                                collect(docid, 0);
                            }
                        }
                    }
                }
            }
            if (c < effectiveMincount) {
                term = termsEnum.next();
                continue;
            }
            // handle offset and limit
            if (bucketsToSkip > 0) {
                bucketsToSkip--;
                term = termsEnum.next();
                continue;
            }
            if (freq.limit >= 0 && ++bucketsReturned > freq.limit) {
                return null;
            }
            // set count in case other stats depend on it
            countAcc.incrementCount(0, c);
            // OK, we have a good bucket to return... first get bucket value before moving to next term
            Object bucketVal = sf.getType().toObject(sf, term);
            TermQuery bucketQuery = hasSubFacets ? new TermQuery(new Term(freq.field, term)) : null;
            term = termsEnum.next();
            SimpleOrderedMap<Object> bucket = new SimpleOrderedMap<>();
            bucket.add("val", bucketVal);
            addStats(bucket, 0);
            if (hasSubFacets) {
                processSubs(bucket, bucketQuery, termSet, false, null);
            }
            return bucket;
        }
    } finally {
        if (termSet != null) {
            // termSet.decref();  // OFF-HEAP
            termSet = null;
        }
    }
    // end of the iteration
    return null;
}
Also used : SortedIntDocSet(org.apache.solr.search.SortedIntDocSet) HashDocSet(org.apache.solr.search.HashDocSet) TermQuery(org.apache.lucene.search.TermQuery) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) Term(org.apache.lucene.index.Term) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) MultiPostingsEnum(org.apache.lucene.index.MultiPostingsEnum) HashDocSet(org.apache.solr.search.HashDocSet) SortedIntDocSet(org.apache.solr.search.SortedIntDocSet) DocSet(org.apache.solr.search.DocSet)

Aggregations

MultiPostingsEnum (org.apache.lucene.index.MultiPostingsEnum)3 PostingsEnum (org.apache.lucene.index.PostingsEnum)2 Term (org.apache.lucene.index.Term)2 DocSet (org.apache.solr.search.DocSet)2 HashDocSet (org.apache.solr.search.HashDocSet)2 SolrIndexSearcher (org.apache.solr.search.SolrIndexSearcher)2 SortedIntDocSet (org.apache.solr.search.SortedIntDocSet)2 Fields (org.apache.lucene.index.Fields)1 LeafReader (org.apache.lucene.index.LeafReader)1 Terms (org.apache.lucene.index.Terms)1 TermsEnum (org.apache.lucene.index.TermsEnum)1 TermQuery (org.apache.lucene.search.TermQuery)1 BytesRef (org.apache.lucene.util.BytesRef)1 CharsRefBuilder (org.apache.lucene.util.CharsRefBuilder)1 FixedBitSet (org.apache.lucene.util.FixedBitSet)1 NamedList (org.apache.solr.common.util.NamedList)1 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)1 FieldType (org.apache.solr.schema.FieldType)1 IndexSchema (org.apache.solr.schema.IndexSchema)1 BitDocSet (org.apache.solr.search.BitDocSet)1