Search in sources :

Example 81 with SolrException

use of org.apache.solr.common.SolrException in project lucene-solr by apache.

the class DocValuesStats method getCounts.

public static StatsValues getCounts(SolrIndexSearcher searcher, StatsField statsField, DocSet docs, String[] facet) throws IOException {
    final SchemaField schemaField = statsField.getSchemaField();
    assert null != statsField.getSchemaField() : "DocValuesStats requires a StatsField using a SchemaField";
    final String fieldName = schemaField.getName();
    final FieldType ft = schemaField.getType();
    final StatsValues res = StatsValuesFactory.createStatsValues(statsField);
    //Initialize facetstats, if facets have been passed in
    final FieldFacetStats[] facetStats = new FieldFacetStats[facet.length];
    int upto = 0;
    for (String facetField : facet) {
        SchemaField fsf = searcher.getSchema().getField(facetField);
        if (fsf.multiValued()) {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Stats can only facet on single-valued fields, not: " + facetField);
        }
        SchemaField facetSchemaField = searcher.getSchema().getField(facetField);
        facetStats[upto++] = new FieldFacetStats(searcher, facetSchemaField, statsField);
    }
    // TODO: remove multiValuedFieldCache(), check dv type / uninversion type?
    final boolean multiValued = schemaField.multiValued() || ft.multiValuedFieldCache();
    // for term lookups only
    SortedSetDocValues si;
    // for mapping per-segment ords to global ones
    OrdinalMap ordinalMap = null;
    if (multiValued) {
        si = searcher.getSlowAtomicReader().getSortedSetDocValues(fieldName);
        if (si instanceof MultiSortedSetDocValues) {
            ordinalMap = ((MultiDocValues.MultiSortedSetDocValues) si).mapping;
        }
    } else {
        SortedDocValues single = searcher.getSlowAtomicReader().getSortedDocValues(fieldName);
        si = single == null ? null : DocValues.singleton(single);
        if (single instanceof MultiDocValues.MultiSortedDocValues) {
            ordinalMap = ((MultiDocValues.MultiSortedDocValues) single).mapping;
        }
    }
    if (si == null) {
        si = DocValues.emptySortedSet();
    }
    if (si.getValueCount() >= Integer.MAX_VALUE) {
        throw new UnsupportedOperationException("Currently this stats method is limited to " + Integer.MAX_VALUE + " unique terms");
    }
    int missingDocCountTotal = 0;
    final int nTerms = (int) si.getValueCount();
    // count collection array only needs to be as big as the number of terms we are
    // going to collect counts for.
    final int[] counts = new int[nTerms];
    Filter filter = docs.getTopFilter();
    List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
    for (int subIndex = 0; subIndex < leaves.size(); subIndex++) {
        LeafReaderContext leaf = leaves.get(subIndex);
        // solr docsets already exclude any deleted docs
        DocIdSet dis = filter.getDocIdSet(leaf, null);
        DocIdSetIterator disi = null;
        if (dis != null) {
            disi = dis.iterator();
        }
        if (disi != null) {
            int docBase = leaf.docBase;
            if (multiValued) {
                SortedSetDocValues sub = leaf.reader().getSortedSetDocValues(fieldName);
                if (sub == null) {
                    sub = DocValues.emptySortedSet();
                }
                SortedDocValues singleton = DocValues.unwrapSingleton(sub);
                if (singleton != null) {
                    // some codecs may optimize SORTED_SET storage for single-valued fields
                    missingDocCountTotal += accumSingle(counts, docBase, facetStats, singleton, disi, subIndex, ordinalMap);
                } else {
                    missingDocCountTotal += accumMulti(counts, docBase, facetStats, sub, disi, subIndex, ordinalMap);
                }
            } else {
                SortedDocValues sub = leaf.reader().getSortedDocValues(fieldName);
                if (sub == null) {
                    sub = DocValues.emptySorted();
                }
                missingDocCountTotal += accumSingle(counts, docBase, facetStats, sub, disi, subIndex, ordinalMap);
            }
        }
    }
    // add results in index order
    for (int ord = 0; ord < counts.length; ord++) {
        int count = counts[ord];
        if (count > 0) {
            final BytesRef value = si.lookupOrd(ord);
            res.accumulate(value, count);
            for (FieldFacetStats f : facetStats) {
                f.accumulateTermNum(ord, value);
            }
        }
    }
    res.addMissing(missingDocCountTotal);
    if (facetStats.length > 0) {
        for (FieldFacetStats f : facetStats) {
            Map<String, StatsValues> facetStatsValues = f.facetStatsValues;
            f.accumulateMissing();
            res.addFacet(f.name, facetStatsValues);
        }
    }
    return res;
}
Also used : MultiSortedSetDocValues(org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues) DocIdSet(org.apache.lucene.search.DocIdSet) MultiDocValues(org.apache.lucene.index.MultiDocValues) OrdinalMap(org.apache.lucene.index.MultiDocValues.OrdinalMap) SortedDocValues(org.apache.lucene.index.SortedDocValues) FieldType(org.apache.solr.schema.FieldType) SchemaField(org.apache.solr.schema.SchemaField) FieldFacetStats(org.apache.solr.handler.component.FieldFacetStats) MultiSortedSetDocValues(org.apache.lucene.index.MultiDocValues.MultiSortedSetDocValues) SortedSetDocValues(org.apache.lucene.index.SortedSetDocValues) Filter(org.apache.solr.search.Filter) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) StatsValues(org.apache.solr.handler.component.StatsValues) DocIdSetIterator(org.apache.lucene.search.DocIdSetIterator) SolrException(org.apache.solr.common.SolrException) BytesRef(org.apache.lucene.util.BytesRef)

Example 82 with SolrException

use of org.apache.solr.common.SolrException in project lucene-solr by apache.

the class IndexSortedFacetCollector method getFacetCounts.

NamedList<Integer> getFacetCounts(Executor executor) throws IOException {
    CompletionService<SegFacet> completionService = new ExecutorCompletionService<>(executor);
    // reuse the translation logic to go from top level set to per-segment set
    baseSet = docs.getTopFilter();
    final List<LeafReaderContext> leaves = searcher.getTopReaderContext().leaves();
    // The list of pending tasks that aren't immediately submitted
    // TODO: Is there a completion service, or a delegating executor that can
    // limit the number of concurrent tasks submitted to a bigger executor?
    LinkedList<Callable<SegFacet>> pending = new LinkedList<>();
    int threads = nThreads <= 0 ? Integer.MAX_VALUE : nThreads;
    for (final LeafReaderContext leave : leaves) {
        final SegFacet segFacet = new SegFacet(leave);
        Callable<SegFacet> task = () -> {
            segFacet.countTerms();
            return segFacet;
        };
        if (--threads >= 0) {
            completionService.submit(task);
        } else {
            pending.add(task);
        }
    }
    // now merge the per-segment results
    PriorityQueue<SegFacet> queue = new PriorityQueue<SegFacet>(leaves.size()) {

        @Override
        protected boolean lessThan(SegFacet a, SegFacet b) {
            return a.tempBR.compareTo(b.tempBR) < 0;
        }
    };
    boolean hasMissingCount = false;
    int missingCount = 0;
    for (int i = 0, c = leaves.size(); i < c; i++) {
        SegFacet seg = null;
        try {
            Future<SegFacet> future = completionService.take();
            seg = future.get();
            if (!pending.isEmpty()) {
                completionService.submit(pending.removeFirst());
            }
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, e);
        } catch (ExecutionException e) {
            Throwable cause = e.getCause();
            if (cause instanceof RuntimeException) {
                throw (RuntimeException) cause;
            } else {
                throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Error in per-segment faceting on field: " + fieldName, cause);
            }
        }
        if (seg.startTermIndex < seg.endTermIndex) {
            if (seg.startTermIndex == -1) {
                hasMissingCount = true;
                missingCount += seg.counts[0];
                seg.pos = 0;
            } else {
                seg.pos = seg.startTermIndex;
            }
            if (seg.pos < seg.endTermIndex && (mincount < 1 || seg.hasAnyCount)) {
                seg.tenum = seg.si.termsEnum();
                seg.tenum.seekExact(seg.pos);
                seg.tempBR = seg.tenum.term();
                queue.add(seg);
            }
        }
    }
    FacetCollector collector;
    if (sort.equals(FacetParams.FACET_SORT_COUNT) || sort.equals(FacetParams.FACET_SORT_COUNT_LEGACY)) {
        collector = new CountSortedFacetCollector(offset, limit, mincount);
    } else {
        collector = new IndexSortedFacetCollector(offset, limit, mincount);
    }
    BytesRefBuilder val = new BytesRefBuilder();
    while (queue.size() > 0) {
        SegFacet seg = queue.top();
        boolean collect = termFilter == null || termFilter.test(seg.tempBR);
        // may be shared across calls.
        if (collect) {
            val.copyBytes(seg.tempBR);
        }
        int count = 0;
        do {
            if (collect) {
                count += seg.counts[seg.pos - seg.startTermIndex];
            }
            // if mincount>0 then seg.pos++ can skip ahead to the next non-zero entry.
            do {
                ++seg.pos;
            } while (//stop incrementing before we run off the end
            (seg.pos < seg.endTermIndex) && //move term enum forward with position -- dont care about value 
            (seg.tenum.next() != null || true) && //only skip ahead if mincount > 0
            (mincount > 0) && //check zero count
            (seg.counts[seg.pos - seg.startTermIndex] == 0));
            if (seg.pos >= seg.endTermIndex) {
                queue.pop();
                seg = queue.top();
            } else {
                seg.tempBR = seg.tenum.term();
                seg = queue.updateTop();
            }
        } while (seg != null && val.get().compareTo(seg.tempBR) == 0);
        if (collect) {
            boolean stop = collector.collect(val.get(), count);
            if (stop)
                break;
        }
    }
    NamedList<Integer> res = collector.getFacetCounts();
    // convert labels to readable form    
    FieldType ft = searcher.getSchema().getFieldType(fieldName);
    int sz = res.size();
    for (int i = 0; i < sz; i++) {
        res.setName(i, ft.indexedToReadable(res.getName(i)));
    }
    if (missing) {
        if (!hasMissingCount) {
            missingCount = SimpleFacets.getFieldMissingCount(searcher, docs, fieldName);
        }
        res.add(null, missingCount);
    }
    return res;
}
Also used : ExecutorCompletionService(java.util.concurrent.ExecutorCompletionService) Callable(java.util.concurrent.Callable) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) ExecutionException(java.util.concurrent.ExecutionException) SolrException(org.apache.solr.common.SolrException) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) PriorityQueue(org.apache.lucene.util.PriorityQueue) LinkedList(java.util.LinkedList) FieldType(org.apache.solr.schema.FieldType)

Example 83 with SolrException

use of org.apache.solr.common.SolrException in project lucene-solr by apache.

the class FacetProcessor method handleBlockJoin.

// returns "true" if filters were applied to fcontext.base already
private boolean handleBlockJoin() throws IOException {
    boolean appliedFilters = false;
    if (!(freq.domain.toChildren || freq.domain.toParent))
        return appliedFilters;
    // TODO: avoid query parsing per-bucket somehow...
    String parentStr = freq.domain.parents;
    Query parentQuery;
    try {
        QParser parser = QParser.getParser(parentStr, fcontext.req);
        parser.setIsFilter(true);
        parentQuery = parser.getQuery();
    } catch (SyntaxError err) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Error parsing block join parent specification: " + parentStr);
    }
    BitDocSet parents = fcontext.searcher.getDocSetBits(parentQuery);
    DocSet input = fcontext.base;
    DocSet result;
    if (freq.domain.toChildren) {
        // If there are filters on this facet, then use them as acceptDocs when executing toChildren.
        // We need to remember to not redundantly re-apply these filters after.
        DocSet acceptDocs = this.filter;
        if (acceptDocs == null) {
            acceptDocs = fcontext.searcher.getLiveDocs();
        } else {
            appliedFilters = true;
        }
        result = BlockJoin.toChildren(input, parents, acceptDocs, fcontext.qcontext);
    } else {
        result = BlockJoin.toParents(input, parents, fcontext.qcontext);
    }
    fcontext.base = result;
    return appliedFilters;
}
Also used : BitDocSet(org.apache.solr.search.BitDocSet) Query(org.apache.lucene.search.Query) BooleanQuery(org.apache.lucene.search.BooleanQuery) SyntaxError(org.apache.solr.search.SyntaxError) QParser(org.apache.solr.search.QParser) SolrException(org.apache.solr.common.SolrException) BitDocSet(org.apache.solr.search.BitDocSet) DocSet(org.apache.solr.search.DocSet)

Example 84 with SolrException

use of org.apache.solr.common.SolrException in project lucene-solr by apache.

the class FacetProcessor method createProcessor.

/** factory method for invoking json facet framework as whole.
   * Note: this is currently only used from SimpleFacets, not from JSON Facet API itself. */
public static FacetProcessor<?> createProcessor(SolrQueryRequest req, Map<String, Object> params, DocSet docs) {
    FacetParser parser = new FacetTopParser(req);
    FacetRequest facetRequest = null;
    try {
        facetRequest = parser.parse(params);
    } catch (SyntaxError syntaxError) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
    }
    FacetContext fcontext = new FacetContext();
    fcontext.base = docs;
    fcontext.req = req;
    fcontext.searcher = req.getSearcher();
    fcontext.qcontext = QueryContext.newContext(fcontext.searcher);
    return facetRequest.createFacetProcessor(fcontext);
}
Also used : SyntaxError(org.apache.solr.search.SyntaxError) SolrException(org.apache.solr.common.SolrException)

Example 85 with SolrException

use of org.apache.solr.common.SolrException in project lucene-solr by apache.

the class FacetProcessor method evalFilters.

private void evalFilters() throws IOException {
    if (freq.domain.filters == null || freq.domain.filters.isEmpty())
        return;
    List<Query> qlist = new ArrayList<>(freq.domain.filters.size());
    // TODO: prevent parsing filters each time!
    for (Object rawFilter : freq.domain.filters) {
        if (rawFilter instanceof String) {
            QParser parser = null;
            try {
                parser = QParser.getParser((String) rawFilter, fcontext.req);
                parser.setIsFilter(true);
                Query symbolicFilter = parser.getQuery();
                qlist.add(symbolicFilter);
            } catch (SyntaxError syntaxError) {
                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
            }
        } else if (rawFilter instanceof Map) {
            Map<String, Object> m = (Map<String, Object>) rawFilter;
            String type;
            Object args;
            if (m.size() == 1) {
                Map.Entry<String, Object> entry = m.entrySet().iterator().next();
                type = entry.getKey();
                args = entry.getValue();
            } else {
                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't convert map to query:" + rawFilter);
            }
            if (!"param".equals(type)) {
                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown type. Can't convert map to query:" + rawFilter);
            }
            String tag;
            if (!(args instanceof String)) {
                throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can't retrieve non-string param:" + args);
            }
            tag = (String) args;
            String[] qstrings = fcontext.req.getParams().getParams(tag);
            if (qstrings != null) {
                for (String qstring : qstrings) {
                    QParser parser = null;
                    try {
                        parser = QParser.getParser((String) qstring, fcontext.req);
                        parser.setIsFilter(true);
                        Query symbolicFilter = parser.getQuery();
                        qlist.add(symbolicFilter);
                    } catch (SyntaxError syntaxError) {
                        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, syntaxError);
                    }
                }
            }
        } else {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Bad query (expected a string):" + rawFilter);
        }
    }
    this.filter = fcontext.searcher.getDocSet(qlist);
}
Also used : Query(org.apache.lucene.search.Query) BooleanQuery(org.apache.lucene.search.BooleanQuery) SyntaxError(org.apache.solr.search.SyntaxError) ArrayList(java.util.ArrayList) QParser(org.apache.solr.search.QParser) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) LinkedHashMap(java.util.LinkedHashMap) Map(java.util.Map) IdentityHashMap(java.util.IdentityHashMap) SolrException(org.apache.solr.common.SolrException)

Aggregations

SolrException (org.apache.solr.common.SolrException)617 IOException (java.io.IOException)172 ArrayList (java.util.ArrayList)100 ModifiableSolrParams (org.apache.solr.common.params.ModifiableSolrParams)80 NamedList (org.apache.solr.common.util.NamedList)79 HashMap (java.util.HashMap)75 Map (java.util.Map)70 SolrParams (org.apache.solr.common.params.SolrParams)64 KeeperException (org.apache.zookeeper.KeeperException)60 Test (org.junit.Test)55 Replica (org.apache.solr.common.cloud.Replica)48 Slice (org.apache.solr.common.cloud.Slice)45 DocCollection (org.apache.solr.common.cloud.DocCollection)41 SolrInputDocument (org.apache.solr.common.SolrInputDocument)39 SchemaField (org.apache.solr.schema.SchemaField)39 List (java.util.List)38 SimpleOrderedMap (org.apache.solr.common.util.SimpleOrderedMap)38 SolrServerException (org.apache.solr.client.solrj.SolrServerException)37 SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)34 SolrCore (org.apache.solr.core.SolrCore)33