Search in sources :

Example 1 with DocSet

use of org.apache.solr.search.DocSet in project lucene-solr by apache.

the class PivotFacetProcessor method doPivots.

/**
   * Recursive function to compute all the pivot counts for the values under the specified field
   */
protected List<NamedList<Object>> doPivots(NamedList<Integer> superFacets, String field, String subField, Deque<String> fnames, Deque<String> vnames, ParsedParams parsed, List<StatsField> statsFields, List<FacetComponent.FacetBase> facetQueries, List<RangeFacetRequest> facetRanges) throws IOException {
    boolean isShard = rb.req.getParams().getBool(ShardParams.IS_SHARD, false);
    SolrIndexSearcher searcher = rb.req.getSearcher();
    // TODO: optimize to avoid converting to an external string and then having to convert back to internal below
    SchemaField sfield = searcher.getSchema().getField(field);
    FieldType ftype = sfield.getType();
    String nextField = fnames.poll();
    // re-usable BytesRefBuilder for conversion of term values to Objects
    BytesRefBuilder termval = new BytesRefBuilder();
    List<NamedList<Object>> values = new ArrayList<>(superFacets.size());
    for (Map.Entry<String, Integer> kv : superFacets) {
        // Only sub-facet if parent facet has positive count - still may not be any values for the sub-field though
        if (kv.getValue() >= getMinCountForField(field)) {
            final String fieldValue = kv.getKey();
            final int pivotCount = kv.getValue();
            SimpleOrderedMap<Object> pivot = new SimpleOrderedMap<>();
            pivot.add("field", field);
            if (null == fieldValue) {
                pivot.add("value", null);
            } else {
                ftype.readableToIndexed(fieldValue, termval);
                pivot.add("value", ftype.toObject(sfield, termval.get()));
            }
            pivot.add("count", pivotCount);
            final DocSet subset = getSubset(parsed.docs, sfield, fieldValue);
            addPivotQueriesAndRanges(pivot, params, subset, facetQueries, facetRanges);
            if (subField != null) {
                NamedList<Integer> facetCounts;
                if (!vnames.isEmpty()) {
                    String val = vnames.pop();
                    facetCounts = new NamedList<>();
                    facetCounts.add(val, getSubsetSize(subset, searcher.getSchema().getField(subField), val));
                } else {
                    facetCounts = this.getTermCountsForPivots(subField, parsed.withDocs(subset));
                }
                if (facetCounts.size() >= 1) {
                    pivot.add("pivot", doPivots(facetCounts, subField, nextField, fnames, vnames, parsed.withDocs(subset), statsFields, facetQueries, facetRanges));
                }
            }
            if ((isShard || 0 < pivotCount) && !statsFields.isEmpty()) {
                Map<String, StatsValues> stv = new LinkedHashMap<>();
                for (StatsField statsField : statsFields) {
                    stv.put(statsField.getOutputKey(), statsField.computeLocalStatsValues(subset));
                }
                pivot.add("stats", StatsComponent.convertToResponse(stv));
            }
            values.add(pivot);
        }
    }
    // put the field back on the list
    fnames.push(nextField);
    return values;
}
Also used : BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) NamedList(org.apache.solr.common.util.NamedList) ArrayList(java.util.ArrayList) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) FieldType(org.apache.solr.schema.FieldType) LinkedHashMap(java.util.LinkedHashMap) SchemaField(org.apache.solr.schema.SchemaField) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) DocSet(org.apache.solr.search.DocSet)

Example 2 with DocSet

use of org.apache.solr.search.DocSet in project lucene-solr by apache.

the class SpellCheckComponent method maxResultsForSuggest.

private Integer maxResultsForSuggest(ResponseBuilder rb) {
    SolrParams params = rb.req.getParams();
    float maxResultsForSuggestParamValue = params.getFloat(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, 0.0f);
    Integer maxResultsForSuggest = null;
    if (maxResultsForSuggestParamValue > 0.0f) {
        if (maxResultsForSuggestParamValue == (int) maxResultsForSuggestParamValue) {
            // If a whole number was passed in, this is a discrete number of documents
            maxResultsForSuggest = (int) maxResultsForSuggestParamValue;
        } else {
            // If a fractional value was passed in, this is the % of documents returned by the specified filter
            // If no specified filter, we use the most restrictive filter of the fq parameters
            String maxResultsFilterQueryString = params.get(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST_FQ);
            int maxResultsByFilters = Integer.MAX_VALUE;
            SolrIndexSearcher searcher = rb.req.getSearcher();
            try {
                if (maxResultsFilterQueryString != null) {
                    // Get the default Lucene query parser
                    QParser parser = QParser.getParser(maxResultsFilterQueryString, rb.req);
                    DocSet s = searcher.getDocSet(parser.getQuery());
                    maxResultsByFilters = s.size();
                } else {
                    List<Query> filters = rb.getFilters();
                    // Get the maximum possible hits within these filters (size of most restrictive filter). 
                    if (filters != null) {
                        for (Query query : filters) {
                            DocSet s = searcher.getDocSet(query);
                            if (s != null) {
                                maxResultsByFilters = Math.min(s.size(), maxResultsByFilters);
                            }
                        }
                    }
                }
            } catch (IOException e) {
                LOG.error(e.toString());
                return null;
            } catch (SyntaxError e) {
                LOG.error(e.toString());
                return null;
            }
            // Recalculate maxResultsForSuggest if filters were specified
            if (maxResultsByFilters != Integer.MAX_VALUE) {
                maxResultsForSuggest = Math.round(maxResultsByFilters * maxResultsForSuggestParamValue);
            }
        }
    }
    return maxResultsForSuggest;
}
Also used : Query(org.apache.lucene.search.Query) SyntaxError(org.apache.solr.search.SyntaxError) QParser(org.apache.solr.search.QParser) SolrParams(org.apache.solr.common.params.SolrParams) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) IOException(java.io.IOException) DocSet(org.apache.solr.search.DocSet)

Example 3 with DocSet

use of org.apache.solr.search.DocSet in project lucene-solr by apache.

the class StatsField method computeBaseDocSet.

/**
   * Computes a base {@link DocSet} for the current request to be used
   * when computing global stats for the local index.
   *
   * This is typically the same as the main DocSet for the {@link ResponseBuilder}
   * unless {@link CommonParams#TAG tag}ged filter queries have been excluded using 
   * the {@link CommonParams#EXCLUDE ex} local param
   */
public DocSet computeBaseDocSet() throws IOException {
    DocSet docs = rb.getResults().docSet;
    Map<?, ?> tagMap = (Map<?, ?>) rb.req.getContext().get("tags");
    if (excludeTagList.isEmpty() || null == tagMap) {
        // aren't any tagged filters to exclude anyway.
        return docs;
    }
    IdentityHashMap<Query, Boolean> excludeSet = new IdentityHashMap<Query, Boolean>();
    for (String excludeTag : excludeTagList) {
        Object olst = tagMap.get(excludeTag);
        // tagMap has entries of List<String,List<QParser>>, but subject to change in the future
        if (!(olst instanceof Collection))
            continue;
        for (Object o : (Collection<?>) olst) {
            if (!(o instanceof QParser))
                continue;
            QParser qp = (QParser) o;
            try {
                excludeSet.put(qp.getQuery(), Boolean.TRUE);
            } catch (SyntaxError e) {
                // failed when attempting to execute the query, but just in case...
                throw new SolrException(ErrorCode.BAD_REQUEST, "Excluded query can't be parsed: " + originalParam + " due to: " + e.getMessage(), e);
            }
        }
    }
    if (excludeSet.size() == 0)
        return docs;
    List<Query> qlist = new ArrayList<Query>();
    // add the base query
    if (!excludeSet.containsKey(rb.getQuery())) {
        qlist.add(rb.getQuery());
    }
    // add the filters
    if (rb.getFilters() != null) {
        for (Query q : rb.getFilters()) {
            if (!excludeSet.containsKey(q)) {
                qlist.add(q);
            }
        }
    }
    // get the new base docset for this facet
    return searcher.getDocSet(qlist);
}
Also used : Query(org.apache.lucene.search.Query) FunctionQuery(org.apache.lucene.queries.function.FunctionQuery) IdentityHashMap(java.util.IdentityHashMap) ArrayList(java.util.ArrayList) SyntaxError(org.apache.solr.search.SyntaxError) QParser(org.apache.solr.search.QParser) Collection(java.util.Collection) Map(java.util.Map) IdentityHashMap(java.util.IdentityHashMap) DocSet(org.apache.solr.search.DocSet) SolrException(org.apache.solr.common.SolrException)

Example 4 with DocSet

use of org.apache.solr.search.DocSet in project lucene-solr by apache.

the class FilterQuery method createWeight.

@Override
public Weight createWeight(IndexSearcher searcher, boolean needScores, float boost) throws IOException {
    if (!(searcher instanceof SolrIndexSearcher)) {
        // delete-by-query won't have SolrIndexSearcher
        return new BoostQuery(new ConstantScoreQuery(q), 0).createWeight(searcher, needScores, 1f);
    }
    SolrIndexSearcher solrSearcher = (SolrIndexSearcher) searcher;
    DocSet docs = solrSearcher.getDocSet(q);
    return new BoostQuery(new SolrConstantScoreQuery(docs.getTopFilter()), 0).createWeight(searcher, needScores, 1f);
}
Also used : SolrConstantScoreQuery(org.apache.solr.search.SolrConstantScoreQuery) ConstantScoreQuery(org.apache.lucene.search.ConstantScoreQuery) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) SolrConstantScoreQuery(org.apache.solr.search.SolrConstantScoreQuery) BoostQuery(org.apache.lucene.search.BoostQuery) DocSet(org.apache.solr.search.DocSet)

Example 5 with DocSet

use of org.apache.solr.search.DocSet in project lucene-solr by apache.

the class FacetProcessor method handleBlockJoin.

// returns "true" if filters were applied to fcontext.base already
private boolean handleBlockJoin() throws IOException {
    boolean appliedFilters = false;
    if (!(freq.domain.toChildren || freq.domain.toParent))
        return appliedFilters;
    // TODO: avoid query parsing per-bucket somehow...
    String parentStr = freq.domain.parents;
    Query parentQuery;
    try {
        QParser parser = QParser.getParser(parentStr, fcontext.req);
        parser.setIsFilter(true);
        parentQuery = parser.getQuery();
    } catch (SyntaxError err) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing block join parent specification: " + parentStr);
    }
    BitDocSet parents = fcontext.searcher.getDocSetBits(parentQuery);
    DocSet input = fcontext.base;
    DocSet result;
    if (freq.domain.toChildren) {
        // If there are filters on this facet, then use them as acceptDocs when executing toChildren.
        // We need to remember to not redundantly re-apply these filters after.
        DocSet acceptDocs = this.filter;
        if (acceptDocs == null) {
            acceptDocs = fcontext.searcher.getLiveDocs();
        } else {
            appliedFilters = true;
        }
        result = BlockJoin.toChildren(input, parents, acceptDocs, fcontext.qcontext);
    } else {
        result = BlockJoin.toParents(input, parents, fcontext.qcontext);
    }
    fcontext.base = result;
    return appliedFilters;
}
Also used : BitDocSet(org.apache.solr.search.BitDocSet) Query(org.apache.lucene.search.Query) BooleanQuery(org.apache.lucene.search.BooleanQuery) SyntaxError(org.apache.solr.search.SyntaxError) QParser(org.apache.solr.search.QParser) SolrException(org.apache.solr.common.SolrException) BitDocSet(org.apache.solr.search.BitDocSet) DocSet(org.apache.solr.search.DocSet)

Aggregations

DocSet (org.apache.solr.search.DocSet)37 BitDocSet (org.apache.solr.search.BitDocSet)19 Query (org.apache.lucene.search.Query)15 Term (org.apache.lucene.index.Term)12 TermQuery (org.apache.lucene.search.TermQuery)11 FixedBitSet (org.apache.lucene.util.FixedBitSet)9 DocIterator (org.apache.solr.search.DocIterator)9 BooleanQuery (org.apache.lucene.search.BooleanQuery)8 SchemaField (org.apache.solr.schema.SchemaField)8 SolrIndexSearcher (org.apache.solr.search.SolrIndexSearcher)7 LeafReaderContext (org.apache.lucene.index.LeafReaderContext)6 HashDocSet (org.apache.solr.search.HashDocSet)6 SortedIntDocSet (org.apache.solr.search.SortedIntDocSet)6 WrappedQuery (org.apache.solr.search.WrappedQuery)6 FieldType (org.apache.solr.schema.FieldType)5 ArrayList (java.util.ArrayList)4 BytesRef (org.apache.lucene.util.BytesRef)4 NamedList (org.apache.solr.common.util.NamedList)4 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)4 IdentityHashMap (java.util.IdentityHashMap)3