use of org.apache.solr.search.BitDocSet in project SearchServices by Alfresco.
the class SolrOwnerScorer method createOwnerScorer.
public static SolrOwnerScorer createOwnerScorer(Weight weight, LeafReaderContext context, SolrIndexSearcher searcher, String authority) throws IOException {
if (AuthorityType.getAuthorityType(authority) == AuthorityType.USER) {
DocSet ownedDocs = (DocSet) searcher.cacheLookup(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority);
if (ownedDocs == null) {
// Cache miss: query the index for docs where the owner matches the authority.
ownedDocs = searcher.getDocSet(new TermQuery(new Term(QueryConstants.FIELD_OWNER, authority)));
searcher.cacheInsert(CacheConstants.ALFRESCO_OWNERLOOKUP_CACHE, authority, ownedDocs);
}
return new SolrOwnerScorer(weight, ownedDocs, context, searcher);
}
// Return an empty doc set, as the authority isn't a user.
return new SolrOwnerScorer(weight, new BitDocSet(new FixedBitSet(0)), context, searcher);
}
use of org.apache.solr.search.BitDocSet in project lucene-solr by apache.
the class SimpleFacets method computeDocSet.
protected DocSet computeDocSet(DocSet baseDocSet, List<String> excludeTagList) throws SyntaxError, IOException {
Map<?, ?> tagMap = (Map<?, ?>) req.getContext().get("tags");
// rb can be null if facets are being calculated from a RequestHandler e.g. MoreLikeThisHandler
if (tagMap == null || rb == null) {
return baseDocSet;
}
IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<>();
for (String excludeTag : excludeTagList) {
Object olst = tagMap.get(excludeTag);
// tagMap has entries of List<String,List<QParser>>, but subject to change in the future
if (!(olst instanceof Collection))
continue;
for (Object o : (Collection<?>) olst) {
if (!(o instanceof QParser))
continue;
QParser qp = (QParser) o;
excludeSet.put(qp.getQuery(), Boolean.TRUE);
}
}
if (excludeSet.size() == 0)
return baseDocSet;
List<Query> qlist = new ArrayList<>();
// add the base query
if (!excludeSet.containsKey(rb.getQuery())) {
qlist.add(rb.getQuery());
}
// add the filters
if (rb.getFilters() != null) {
for (Query q : rb.getFilters()) {
if (!excludeSet.containsKey(q)) {
qlist.add(q);
}
}
}
// get the new base docset for this facet
DocSet base = searcher.getDocSet(qlist);
if (rb.grouping() && rb.getGroupingSpec().isTruncateGroups()) {
Grouping grouping = new Grouping(searcher, null, rb.getQueryCommand(), false, 0, false);
grouping.setWithinGroupSort(rb.getGroupingSpec().getSortWithinGroup());
if (rb.getGroupingSpec().getFields().length > 0) {
grouping.addFieldCommand(rb.getGroupingSpec().getFields()[0], req);
} else if (rb.getGroupingSpec().getFunctions().length > 0) {
grouping.addFunctionCommand(rb.getGroupingSpec().getFunctions()[0], req);
} else {
return base;
}
AllGroupHeadsCollector allGroupHeadsCollector = grouping.getCommands().get(0).createAllGroupCollector();
searcher.search(base.getTopFilter(), allGroupHeadsCollector);
return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
} else {
return base;
}
}
use of org.apache.solr.search.BitDocSet in project lucene-solr by apache.
the class SolrRangeQuery method createDocSet.
private DocSet createDocSet(SolrIndexSearcher searcher, long cost) throws IOException {
int maxDoc = searcher.maxDoc();
BitDocSet liveDocs = searcher.getLiveDocs();
FixedBitSet liveBits = liveDocs.size() == maxDoc ? null : liveDocs.getBits();
DocSetBuilder builder = new DocSetBuilder(maxDoc, cost);
List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
int maxTermsPerSegment = 0;
for (LeafReaderContext ctx : leaves) {
TermsEnum te = getTermsEnum(ctx);
int termsVisited = builder.add(te, ctx.docBase);
maxTermsPerSegment = Math.max(maxTermsPerSegment, termsVisited);
}
DocSet set = maxTermsPerSegment <= 1 ? builder.buildUniqueInOrder(liveBits) : builder.build(liveBits);
return DocSetUtil.getDocSet(set, searcher);
}
use of org.apache.solr.search.BitDocSet in project lucene-solr by apache.
the class CommandHandler method computeGroupedDocSet.
private DocSet computeGroupedDocSet(Query query, ProcessedFilter filter, List<Collector> collectors) throws IOException {
Command firstCommand = commands.get(0);
String field = firstCommand.getKey();
SchemaField sf = searcher.getSchema().getField(field);
FieldType fieldType = sf.getType();
final AllGroupHeadsCollector allGroupHeadsCollector;
if (fieldType.getNumberType() != null) {
ValueSource vs = fieldType.getValueSource(sf, null);
allGroupHeadsCollector = AllGroupHeadsCollector.newCollector(new ValueSourceGroupSelector(vs, new HashMap<>()), firstCommand.getWithinGroupSort());
} else {
allGroupHeadsCollector = AllGroupHeadsCollector.newCollector(new TermGroupSelector(firstCommand.getKey()), firstCommand.getWithinGroupSort());
}
if (collectors.isEmpty()) {
searchWithTimeLimiter(query, filter, allGroupHeadsCollector);
} else {
collectors.add(allGroupHeadsCollector);
searchWithTimeLimiter(query, filter, MultiCollector.wrap(collectors.toArray(new Collector[collectors.size()])));
}
return new BitDocSet(allGroupHeadsCollector.retrieveGroupHeads(searcher.maxDoc()));
}
use of org.apache.solr.search.BitDocSet in project lucene-solr by apache.
the class BlockJoinDocSetFacetComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
final BlockJoinParentQParser.AllParentsAware bjq = (BlockJoinParentQParser.AllParentsAware) rb.req.getContext().get(bjqKey);
if (bjq != null) {
final DocSet parentResult = rb.getResults().docSet;
final BitDocSet allParentsBitsDocSet = rb.req.getSearcher().getDocSetBits(bjq.getParentQuery());
final DocSet allChildren = BlockJoin.toChildren(parentResult, allParentsBitsDocSet, rb.req.getSearcher().getDocSetBits(new MatchAllDocsQuery()), QueryContext.newContext(rb.req.getSearcher()));
final DocSet childQueryDocSet = rb.req.getSearcher().getDocSet(bjq.getChildQuery());
final DocSet selectedChildren = allChildren.intersection(childQueryDocSet);
// don't include parent into facet counts
//childResult = childResult.union(parentResult);// just to mimic the current logic
final List<LeafReaderContext> leaves = rb.req.getSearcher().getIndexReader().leaves();
Filter filter = selectedChildren.getTopFilter();
final BlockJoinFacetAccsHolder facetCounter = new BlockJoinFacetAccsHolder(rb.req);
for (int subIdx = 0; subIdx < leaves.size(); subIdx++) {
LeafReaderContext subCtx = leaves.get(subIdx);
// solr docsets already exclude any deleted docs
DocIdSet dis = filter.getDocIdSet(subCtx, null);
AggregatableDocIter iter = new SegmentChildren(subCtx, dis, allParentsBitsDocSet);
if (iter.hasNext()) {
facetCounter.doSetNextReader(subCtx);
facetCounter.countFacets(iter);
}
}
facetCounter.finish();
rb.req.getContext().put(COLLECTOR_CONTEXT_PARAM, facetCounter);
super.process(rb);
}
}
Aggregations