Search in sources :

Example 1 with DocSlice

use of org.apache.solr.search.DocSlice in project lucene-solr by apache.

the class CarrotClusteringEngine method getDocuments.

/**
   * Prepares Carrot2 documents for clustering.
   */
private List<Document> getDocuments(SolrDocumentList solrDocList, Map<SolrDocument, Integer> docIds, Query query, final SolrQueryRequest sreq) throws IOException {
    SolrHighlighter highlighter = null;
    SolrParams solrParams = sreq.getParams();
    SolrCore core = sreq.getCore();
    String urlField = solrParams.get(CarrotParams.URL_FIELD_NAME, "url");
    String titleFieldSpec = solrParams.get(CarrotParams.TITLE_FIELD_NAME, "title");
    String snippetFieldSpec = solrParams.get(CarrotParams.SNIPPET_FIELD_NAME, titleFieldSpec);
    String languageField = solrParams.get(CarrotParams.LANGUAGE_FIELD_NAME, null);
    // Maps Solr field names to Carrot2 custom field names
    Map<String, String> customFields = getCustomFieldsMap(solrParams);
    // Parse language code map string into a map
    Map<String, String> languageCodeMap = new HashMap<>();
    if (StringUtils.isNotBlank(languageField)) {
        for (String pair : solrParams.get(CarrotParams.LANGUAGE_CODE_MAP, "").split("[, ]")) {
            final String[] split = pair.split(":");
            if (split.length == 2 && StringUtils.isNotBlank(split[0]) && StringUtils.isNotBlank(split[1])) {
                languageCodeMap.put(split[0], split[1]);
            } else {
                log.warn("Unsupported format for " + CarrotParams.LANGUAGE_CODE_MAP + ": '" + pair + "'. Skipping this mapping.");
            }
        }
    }
    // Get the documents
    boolean produceSummary = solrParams.getBool(CarrotParams.PRODUCE_SUMMARY, false);
    SolrQueryRequest req = null;
    String[] snippetFieldAry = null;
    if (produceSummary) {
        highlighter = HighlightComponent.getHighlighter(core);
        if (highlighter != null) {
            Map<String, Object> args = new HashMap<>();
            snippetFieldAry = snippetFieldSpec.split("[, ]");
            args.put(HighlightParams.FIELDS, snippetFieldAry);
            args.put(HighlightParams.HIGHLIGHT, "true");
            //we don't care about actually highlighting the area
            args.put(HighlightParams.SIMPLE_PRE, "");
            args.put(HighlightParams.SIMPLE_POST, "");
            args.put(HighlightParams.FRAGSIZE, solrParams.getInt(CarrotParams.SUMMARY_FRAGSIZE, solrParams.getInt(HighlightParams.FRAGSIZE, 100)));
            args.put(HighlightParams.SNIPPETS, solrParams.getInt(CarrotParams.SUMMARY_SNIPPETS, solrParams.getInt(HighlightParams.SNIPPETS, 1)));
            req = new LocalSolrQueryRequest(core, query.toString(), "", 0, 1, args) {

                @Override
                public SolrIndexSearcher getSearcher() {
                    return sreq.getSearcher();
                }
            };
        } else {
            log.warn("No highlighter configured, cannot produce summary");
            produceSummary = false;
        }
    }
    Iterator<SolrDocument> docsIter = solrDocList.iterator();
    List<Document> result = new ArrayList<>(solrDocList.size());
    float[] scores = { 1.0f };
    int[] docsHolder = new int[1];
    Query theQuery = query;
    while (docsIter.hasNext()) {
        SolrDocument sdoc = docsIter.next();
        String snippet = null;
        // See comment in ClusteringComponent#finishStage().
        if (produceSummary && docIds != null) {
            docsHolder[0] = docIds.get(sdoc).intValue();
            DocList docAsList = new DocSlice(0, 1, docsHolder, scores, 1, 1.0f);
            NamedList<Object> highlights = highlighter.doHighlighting(docAsList, theQuery, req, snippetFieldAry);
            if (highlights != null && highlights.size() == 1) {
                // should only be one value given our setup
                // should only be one document
                @SuppressWarnings("unchecked") NamedList<String[]> tmp = (NamedList<String[]>) highlights.getVal(0);
                final StringBuilder sb = new StringBuilder();
                for (int j = 0; j < snippetFieldAry.length; j++) {
                    // Join fragments with a period, so that Carrot2 does not create
                    // cross-fragment phrases, such phrases rarely make sense.
                    String[] highlt = tmp.get(snippetFieldAry[j]);
                    if (highlt != null && highlt.length > 0) {
                        for (int i = 0; i < highlt.length; i++) {
                            sb.append(highlt[i]);
                            sb.append(" . ");
                        }
                    }
                }
                snippet = sb.toString();
            }
        }
        // If summaries not enabled or summary generation failed, use full content.
        if (snippet == null) {
            snippet = getConcatenated(sdoc, snippetFieldSpec);
        }
        // Create a Carrot2 document
        Document carrotDocument = new Document(getConcatenated(sdoc, titleFieldSpec), snippet, ObjectUtils.toString(sdoc.getFieldValue(urlField), ""));
        // Store Solr id of the document, we need it to map document instances 
        // found in clusters back to identifiers.
        carrotDocument.setField(SOLR_DOCUMENT_ID, sdoc.getFieldValue(idFieldName));
        // Set language
        if (StringUtils.isNotBlank(languageField)) {
            Collection<Object> languages = sdoc.getFieldValues(languageField);
            if (languages != null) {
                // Use the first Carrot2-supported language
                for (Object l : languages) {
                    String lang = ObjectUtils.toString(l, "");
                    if (languageCodeMap.containsKey(lang)) {
                        lang = languageCodeMap.get(lang);
                    }
                    // language variants, such as 'zh-cn', but Carrot2 uses underscores.
                    if (lang.indexOf('-') > 0) {
                        lang = lang.replace('-', '_');
                    }
                    // If the language is supported by Carrot2, we'll get a non-null value
                    final LanguageCode carrot2Language = LanguageCode.forISOCode(lang);
                    if (carrot2Language != null) {
                        carrotDocument.setLanguage(carrot2Language);
                        break;
                    }
                }
            }
        }
        // Add custom fields
        if (customFields != null) {
            for (Entry<String, String> entry : customFields.entrySet()) {
                carrotDocument.setField(entry.getValue(), sdoc.getFieldValue(entry.getKey()));
            }
        }
        result.add(carrotDocument);
    }
    return result;
}
Also used : Query(org.apache.lucene.search.Query) HashMap(java.util.HashMap) SolrCore(org.apache.solr.core.SolrCore) ArrayList(java.util.ArrayList) Document(org.carrot2.core.Document) SolrDocument(org.apache.solr.common.SolrDocument) DocSlice(org.apache.solr.search.DocSlice) LanguageCode(org.carrot2.core.LanguageCode) SolrDocument(org.apache.solr.common.SolrDocument) NamedList(org.apache.solr.common.util.NamedList) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) SolrHighlighter(org.apache.solr.highlight.SolrHighlighter) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) LocalSolrQueryRequest(org.apache.solr.request.LocalSolrQueryRequest) SolrParams(org.apache.solr.common.params.SolrParams) DocList(org.apache.solr.search.DocList)

Example 2 with DocSlice

use of org.apache.solr.search.DocSlice in project lucene-solr by apache.

the class ExpandComponent method process.

@SuppressWarnings("unchecked")
@Override
public void process(ResponseBuilder rb) throws IOException {
    if (!rb.doExpand) {
        return;
    }
    SolrQueryRequest req = rb.req;
    SolrParams params = req.getParams();
    String field = params.get(ExpandParams.EXPAND_FIELD);
    String hint = null;
    if (field == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (q instanceof CollapsingQParserPlugin.CollapsingPostFilter) {
                    CollapsingQParserPlugin.CollapsingPostFilter cp = (CollapsingQParserPlugin.CollapsingPostFilter) q;
                    field = cp.getField();
                    hint = cp.hint;
                }
            }
        }
    }
    if (field == null) {
        throw new IOException("Expand field is null.");
    }
    String sortParam = params.get(ExpandParams.EXPAND_SORT);
    String[] fqs = params.getParams(ExpandParams.EXPAND_FQ);
    String qs = params.get(ExpandParams.EXPAND_Q);
    int limit = params.getInt(ExpandParams.EXPAND_ROWS, 5);
    Sort sort = null;
    if (sortParam != null) {
        sort = SortSpecParsing.parseSortSpec(sortParam, rb.req).getSort();
    }
    Query query;
    if (qs == null) {
        query = rb.getQuery();
    } else {
        try {
            QParser parser = QParser.getParser(qs, req);
            query = parser.getQuery();
        } catch (Exception e) {
            throw new IOException(e);
        }
    }
    List<Query> newFilters = new ArrayList<>();
    if (fqs == null) {
        List<Query> filters = rb.getFilters();
        if (filters != null) {
            for (Query q : filters) {
                if (!(q instanceof CollapsingQParserPlugin.CollapsingPostFilter)) {
                    newFilters.add(q);
                }
            }
        }
    } else {
        try {
            for (String fq : fqs) {
                if (fq != null && fq.trim().length() != 0 && !fq.equals("*:*")) {
                    QParser fqp = QParser.getParser(fq, req);
                    newFilters.add(fqp.getQuery());
                }
            }
        } catch (Exception e) {
            throw new IOException(e);
        }
    }
    SolrIndexSearcher searcher = req.getSearcher();
    LeafReader reader = searcher.getSlowAtomicReader();
    SchemaField schemaField = searcher.getSchema().getField(field);
    FieldType fieldType = schemaField.getType();
    SortedDocValues values = null;
    long nullValue = 0L;
    if (fieldType instanceof StrField) {
        //Get The Top Level SortedDocValues
        if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
            Map<String, UninvertingReader.Type> mapping = new HashMap();
            mapping.put(field, UninvertingReader.Type.SORTED);
            UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
            values = uninvertingReader.getSortedDocValues(field);
        } else {
            values = DocValues.getSorted(reader, field);
        }
    } else {
        //Get the nullValue for the numeric collapse field
        String defaultValue = searcher.getSchema().getField(field).getDefaultValue();
        final NumberType numType = fieldType.getNumberType();
        // we don't need to handle invalid 64-bit field types here.
        if (defaultValue != null) {
            if (numType == NumberType.INTEGER) {
                nullValue = Long.parseLong(defaultValue);
            } else if (numType == NumberType.FLOAT) {
                nullValue = Float.floatToIntBits(Float.parseFloat(defaultValue));
            }
        } else if (NumberType.FLOAT.equals(numType)) {
            // Integer case already handled by nullValue defaulting to 0
            nullValue = Float.floatToIntBits(0.0f);
        }
    }
    FixedBitSet groupBits = null;
    LongHashSet groupSet = null;
    DocList docList = rb.getResults().docList;
    IntHashSet collapsedSet = new IntHashSet(docList.size() * 2);
    //Gather the groups for the current page of documents
    DocIterator idit = docList.iterator();
    int[] globalDocs = new int[docList.size()];
    int docsIndex = -1;
    while (idit.hasNext()) {
        globalDocs[++docsIndex] = idit.nextDoc();
    }
    Arrays.sort(globalDocs);
    Query groupQuery = null;
    /*
    * This code gathers the group information for the current page.
    */
    List<LeafReaderContext> contexts = searcher.getTopReaderContext().leaves();
    if (contexts.size() == 0) {
        //When no context is available we can skip the expanding
        return;
    }
    int currentContext = 0;
    int currentDocBase = contexts.get(currentContext).docBase;
    int nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
    IntObjectHashMap<BytesRef> ordBytes = null;
    if (values != null) {
        groupBits = new FixedBitSet(values.getValueCount());
        MultiDocValues.OrdinalMap ordinalMap = null;
        SortedDocValues[] sortedDocValues = null;
        LongValues segmentOrdinalMap = null;
        SortedDocValues currentValues = null;
        if (values instanceof MultiDocValues.MultiSortedDocValues) {
            ordinalMap = ((MultiDocValues.MultiSortedDocValues) values).mapping;
            sortedDocValues = ((MultiDocValues.MultiSortedDocValues) values).values;
            currentValues = sortedDocValues[currentContext];
            segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
        }
        int count = 0;
        ordBytes = new IntObjectHashMap<>();
        for (int i = 0; i < globalDocs.length; i++) {
            int globalDoc = globalDocs[i];
            while (globalDoc >= nextDocBase) {
                currentContext++;
                currentDocBase = contexts.get(currentContext).docBase;
                nextDocBase = (currentContext + 1) < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
                if (ordinalMap != null) {
                    currentValues = sortedDocValues[currentContext];
                    segmentOrdinalMap = ordinalMap.getGlobalOrds(currentContext);
                }
            }
            int contextDoc = globalDoc - currentDocBase;
            if (ordinalMap != null) {
                if (contextDoc > currentValues.docID()) {
                    currentValues.advance(contextDoc);
                }
                if (contextDoc == currentValues.docID()) {
                    int ord = currentValues.ordValue();
                    ++count;
                    BytesRef ref = currentValues.lookupOrd(ord);
                    ord = (int) segmentOrdinalMap.get(ord);
                    ordBytes.put(ord, BytesRef.deepCopyOf(ref));
                    groupBits.set(ord);
                    collapsedSet.add(globalDoc);
                }
            } else {
                if (globalDoc > values.docID()) {
                    values.advance(globalDoc);
                }
                if (globalDoc == values.docID()) {
                    int ord = values.ordValue();
                    ++count;
                    BytesRef ref = values.lookupOrd(ord);
                    ordBytes.put(ord, BytesRef.deepCopyOf(ref));
                    groupBits.set(ord);
                    collapsedSet.add(globalDoc);
                }
            }
        }
        if (count > 0 && count < 200) {
            try {
                groupQuery = getGroupQuery(field, count, ordBytes);
            } catch (Exception e) {
                throw new IOException(e);
            }
        }
    } else {
        groupSet = new LongHashSet(docList.size());
        NumericDocValues collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
        int count = 0;
        for (int i = 0; i < globalDocs.length; i++) {
            int globalDoc = globalDocs[i];
            while (globalDoc >= nextDocBase) {
                currentContext++;
                currentDocBase = contexts.get(currentContext).docBase;
                nextDocBase = currentContext + 1 < contexts.size() ? contexts.get(currentContext + 1).docBase : Integer.MAX_VALUE;
                collapseValues = contexts.get(currentContext).reader().getNumericDocValues(field);
            }
            int contextDoc = globalDoc - currentDocBase;
            int valueDocID = collapseValues.docID();
            if (valueDocID < contextDoc) {
                valueDocID = collapseValues.advance(contextDoc);
            }
            long value;
            if (valueDocID == contextDoc) {
                value = collapseValues.longValue();
            } else {
                value = 0;
            }
            if (value != nullValue) {
                ++count;
                groupSet.add(value);
                collapsedSet.add(globalDoc);
            }
        }
        if (count > 0 && count < 200) {
            if (fieldType.isPointField()) {
                groupQuery = getPointGroupQuery(schemaField, count, groupSet);
            } else {
                groupQuery = getGroupQuery(field, fieldType, count, groupSet);
            }
        }
    }
    Collector collector;
    if (sort != null)
        sort = sort.rewrite(searcher);
    Collector groupExpandCollector = null;
    if (values != null) {
        //Get The Top Level SortedDocValues again so we can re-iterate:
        if (CollapsingQParserPlugin.HINT_TOP_FC.equals(hint)) {
            Map<String, UninvertingReader.Type> mapping = new HashMap();
            mapping.put(field, UninvertingReader.Type.SORTED);
            UninvertingReader uninvertingReader = new UninvertingReader(new ReaderWrapper(searcher.getSlowAtomicReader(), field), mapping);
            values = uninvertingReader.getSortedDocValues(field);
        } else {
            values = DocValues.getSorted(reader, field);
        }
        groupExpandCollector = new GroupExpandCollector(values, groupBits, collapsedSet, limit, sort);
    } else {
        groupExpandCollector = new NumericGroupExpandCollector(field, nullValue, groupSet, collapsedSet, limit, sort);
    }
    if (groupQuery != null) {
        //Limits the results to documents that are in the same group as the documents in the page.
        newFilters.add(groupQuery);
    }
    SolrIndexSearcher.ProcessedFilter pfilter = searcher.getProcessedFilter(null, newFilters);
    if (pfilter.postFilter != null) {
        pfilter.postFilter.setLastDelegate(groupExpandCollector);
        collector = pfilter.postFilter;
    } else {
        collector = groupExpandCollector;
    }
    if (pfilter.filter == null) {
        searcher.search(query, collector);
    } else {
        Query q = new BooleanQuery.Builder().add(query, Occur.MUST).add(pfilter.filter, Occur.FILTER).build();
        searcher.search(q, collector);
    }
    LongObjectMap<Collector> groups = ((GroupCollector) groupExpandCollector).getGroups();
    NamedList outMap = new SimpleOrderedMap();
    CharsRefBuilder charsRef = new CharsRefBuilder();
    for (LongObjectCursor<Collector> cursor : groups) {
        long groupValue = cursor.key;
        TopDocsCollector<?> topDocsCollector = TopDocsCollector.class.cast(cursor.value);
        TopDocs topDocs = topDocsCollector.topDocs();
        ScoreDoc[] scoreDocs = topDocs.scoreDocs;
        if (scoreDocs.length > 0) {
            int[] docs = new int[scoreDocs.length];
            float[] scores = new float[scoreDocs.length];
            for (int i = 0; i < docs.length; i++) {
                ScoreDoc scoreDoc = scoreDocs[i];
                docs[i] = scoreDoc.doc;
                scores[i] = scoreDoc.score;
            }
            DocSlice slice = new DocSlice(0, docs.length, docs, scores, topDocs.totalHits, topDocs.getMaxScore());
            if (fieldType instanceof StrField) {
                final BytesRef bytesRef = ordBytes.get((int) groupValue);
                fieldType.indexedToReadable(bytesRef, charsRef);
                String group = charsRef.toString();
                outMap.add(group, slice);
            } else {
                outMap.add(numericToString(fieldType, groupValue), slice);
            }
        }
    }
    rb.rsp.add("expanded", outMap);
}
Also used : StrField(org.apache.solr.schema.StrField) BooleanQuery(org.apache.lucene.search.BooleanQuery) Query(org.apache.lucene.search.Query) TermInSetQuery(org.apache.lucene.search.TermInSetQuery) SolrConstantScoreQuery(org.apache.solr.search.SolrConstantScoreQuery) BooleanQuery(org.apache.lucene.search.BooleanQuery) HashMap(java.util.HashMap) LongObjectHashMap(com.carrotsearch.hppc.LongObjectHashMap) IntObjectHashMap(com.carrotsearch.hppc.IntObjectHashMap) ArrayList(java.util.ArrayList) IntHashSet(com.carrotsearch.hppc.IntHashSet) MultiDocValues(org.apache.lucene.index.MultiDocValues) DocSlice(org.apache.solr.search.DocSlice) ScoreDoc(org.apache.lucene.search.ScoreDoc) FixedBitSet(org.apache.lucene.util.FixedBitSet) Sort(org.apache.lucene.search.Sort) LeafReaderContext(org.apache.lucene.index.LeafReaderContext) CharsRefBuilder(org.apache.lucene.util.CharsRefBuilder) SortedDocValues(org.apache.lucene.index.SortedDocValues) LongHashSet(com.carrotsearch.hppc.LongHashSet) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) NumberType(org.apache.solr.schema.NumberType) QParser(org.apache.solr.search.QParser) SolrParams(org.apache.solr.common.params.SolrParams) NumericDocValues(org.apache.lucene.index.NumericDocValues) DocIterator(org.apache.solr.search.DocIterator) SimpleOrderedMap(org.apache.solr.common.util.SimpleOrderedMap) UninvertingReader(org.apache.solr.uninverting.UninvertingReader) TopDocs(org.apache.lucene.search.TopDocs) TopFieldCollector(org.apache.lucene.search.TopFieldCollector) LeafCollector(org.apache.lucene.search.LeafCollector) Collector(org.apache.lucene.search.Collector) TopScoreDocCollector(org.apache.lucene.search.TopScoreDocCollector) TopDocsCollector(org.apache.lucene.search.TopDocsCollector) BytesRef(org.apache.lucene.util.BytesRef) LeafReader(org.apache.lucene.index.LeafReader) FilterLeafReader(org.apache.lucene.index.FilterLeafReader) NamedList(org.apache.solr.common.util.NamedList) IOException(java.io.IOException) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) IOException(java.io.IOException) FieldType(org.apache.solr.schema.FieldType) CollapsingQParserPlugin(org.apache.solr.search.CollapsingQParserPlugin) SchemaField(org.apache.solr.schema.SchemaField) NumberType(org.apache.solr.schema.NumberType) FieldType(org.apache.solr.schema.FieldType) DocValuesType(org.apache.lucene.index.DocValuesType) LongValues(org.apache.lucene.util.LongValues) DocList(org.apache.solr.search.DocList)

Example 3 with DocSlice

use of org.apache.solr.search.DocSlice in project Solbase by Photobucket.

the class SolbaseQueryComponent method process.

/**
	 * Actually run the query
	 */
@Override
public void process(ResponseBuilder rb) throws IOException {
    SolrQueryRequest req = rb.req;
    SolrQueryResponse rsp = rb.rsp;
    SolrParams params = req.getParams();
    if (!params.getBool(COMPONENT_NAME, true)) {
        return;
    }
    SolrIndexSearcher searcher = req.getSearcher();
    if (rb.getQueryCommand().getOffset() < 0) {
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "'start' parameter cannot be negative");
    }
    // -1 as flag if not set.
    long timeAllowed = (long) params.getInt(CommonParams.TIME_ALLOWED, -1);
    // Optional: This could also be implemented by the top-level searcher
    // sending
    // a filter that lists the ids... that would be transparent to
    // the request handler, but would be more expensive (and would preserve
    // score
    // too if desired).
    String ids = params.get(ShardParams.IDS);
    if (ids != null) {
        List<String> idArr = StrUtils.splitSmart(ids, ",", true);
        int[] luceneIds = new int[idArr.size()];
        int docs = 0;
        for (int i = 0; i < idArr.size(); i++) {
            luceneIds[docs++] = Integer.parseInt(idArr.get(i));
        }
        // we are indexing docId as solr uniq_id. by doing this, we are
        // bound to INTEGER.MAX_VALUE ~= 2 billion
        // docs is number of docs
        DocListAndSet res = new DocListAndSet();
        res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0, null);
        if (rb.isNeedDocSet()) {
            List<Query> queries = new ArrayList<Query>();
            queries.add(rb.getQuery());
            List<Query> filters = rb.getFilters();
            if (filters != null)
                queries.addAll(filters);
            res.docSet = searcher.getDocSet(queries);
        }
        rb.setResults(res);
        rsp.add("response", rb.getResults().docList);
        return;
    }
    SolrIndexSearcher.QueryCommand cmd = rb.getQueryCommand();
    cmd.setTimeAllowed(timeAllowed);
    SolrIndexSearcher.QueryResult result = new SolrIndexSearcher.QueryResult();
    searcher.search(result, cmd);
    rb.setResult(result);
    rsp.add("response", rb.getResults().docList);
    rsp.getToLog().add("hits", rb.getResults().docList.matches());
    // The query cache doesn't currently store sort field values, and
    // SolrIndexSearcher doesn't
    // currently have an option to return sort field values. Because of
    // this, we
    // take the documents given and re-derive the sort values.
    boolean fsv = req.getParams().getBool(ResponseBuilder.FIELD_SORT_VALUES, false);
    if (fsv) {
        Sort sort = rb.getSortSpec().getSort();
        SortField[] sortFields = sort == null ? new SortField[] { SortField.FIELD_SCORE } : sort.getSort();
        // order is important for the
        NamedList sortVals = new NamedList();
        // sort fields
        // a
        Field field = new Field("dummy", "", Field.Store.YES, Field.Index.NO);
        // dummy
        // Field
        SolrIndexReader reader = searcher.getReader();
        SolrIndexReader[] readers = reader.getLeafReaders();
        SolrIndexReader subReader = reader;
        if (readers.length == 1) {
            // if there is a single segment, use that subReader and avoid
            // looking up each time
            subReader = readers[0];
            readers = null;
        }
        int[] offsets = reader.getLeafOffsets();
        //TODO: need to fetch sort value from collector instead of re-derive lookup from id
        for (SortField sortField : sortFields) {
            int type = sortField.getType();
            if (type == SortField.SCORE || type == SortField.DOC)
                continue;
            FieldComparator comparator = null;
            FieldComparator[] comparators = (readers == null) ? null : new FieldComparator[readers.length];
            String fieldname = sortField.getField();
            FieldType ft = fieldname == null ? null : req.getSchema().getFieldTypeNoEx(fieldname);
            DocSlice docList = (DocSlice) rb.getResults().docList;
            ArrayList<Object> vals = new ArrayList<Object>(docList.size());
            for (int i = docList.offset; i < docList.len; i++) {
                vals.add(new Integer(docList.sorts[i][((EmbeddedSortField) sortField).getFieldNumber() - 1]));
            }
            sortVals.add(fieldname, vals);
        }
        rsp.add("sort_values", sortVals);
    }
    // pre-fetch returned documents
    if (!req.getParams().getBool(ShardParams.IS_SHARD, false) && rb.getResults().docList != null && rb.getResults().docList.size() <= 50) {
        // TODO: this may depend on the highlighter component (or other
        // components?)
        SolrPluginUtils.optimizePreFetchDocs(rb.getResults().docList, rb.getQuery(), req, rsp);
    }
}
Also used : Query(org.apache.lucene.search.Query) DocListAndSet(org.apache.solr.search.DocListAndSet) ArrayList(java.util.ArrayList) SortField(org.apache.lucene.search.SortField) EmbeddedSortField(org.apache.solr.schema.EmbeddedSortField) EmbeddedSortField(org.apache.solr.schema.EmbeddedSortField) DocSlice(org.apache.solr.search.DocSlice) SolrIndexReader(org.apache.solr.search.SolrIndexReader) SortField(org.apache.lucene.search.SortField) Field(org.apache.lucene.document.Field) EmbeddedSortField(org.apache.solr.schema.EmbeddedSortField) Sort(org.apache.lucene.search.Sort) SolrException(org.apache.solr.common.SolrException) SolrQueryResponse(org.apache.solr.request.SolrQueryResponse) NamedList(org.apache.solr.common.util.NamedList) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) FieldType(org.apache.solr.schema.FieldType) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) SolrParams(org.apache.solr.common.params.SolrParams) FieldComparator(org.apache.lucene.search.FieldComparator)

Example 4 with DocSlice

use of org.apache.solr.search.DocSlice in project lucene-solr by apache.

the class QueryComponent method process.

/**
   * Actually run the query
   */
@Override
public void process(ResponseBuilder rb) throws IOException {
    LOG.debug("process: {}", rb.req.getParams());
    SolrQueryRequest req = rb.req;
    SolrParams params = req.getParams();
    if (!params.getBool(COMPONENT_NAME, true)) {
        return;
    }
    SolrIndexSearcher searcher = req.getSearcher();
    StatsCache statsCache = req.getCore().getStatsCache();
    int purpose = params.getInt(ShardParams.SHARDS_PURPOSE, ShardRequest.PURPOSE_GET_TOP_IDS);
    if ((purpose & ShardRequest.PURPOSE_GET_TERM_STATS) != 0) {
        statsCache.returnLocalStats(rb, searcher);
        return;
    }
    // check if we need to update the local copy of global dfs
    if ((purpose & ShardRequest.PURPOSE_SET_TERM_STATS) != 0) {
        // retrieve from request and update local cache
        statsCache.receiveGlobalStats(req);
    }
    SolrQueryResponse rsp = rb.rsp;
    IndexSchema schema = searcher.getSchema();
    // Optional: This could also be implemented by the top-level searcher sending
    // a filter that lists the ids... that would be transparent to
    // the request handler, but would be more expensive (and would preserve score
    // too if desired).
    String ids = params.get(ShardParams.IDS);
    if (ids != null) {
        SchemaField idField = schema.getUniqueKeyField();
        List<String> idArr = StrUtils.splitSmart(ids, ",", true);
        int[] luceneIds = new int[idArr.size()];
        int docs = 0;
        if (idField.getType().isPointField()) {
            for (int i = 0; i < idArr.size(); i++) {
                int id = searcher.search(idField.getType().getFieldQuery(null, idField, idArr.get(i)), 1).scoreDocs[0].doc;
                if (id >= 0) {
                    luceneIds[docs++] = id;
                }
            }
        } else {
            for (int i = 0; i < idArr.size(); i++) {
                int id = searcher.getFirstMatch(new Term(idField.getName(), idField.getType().toInternal(idArr.get(i))));
                if (id >= 0)
                    luceneIds[docs++] = id;
            }
        }
        DocListAndSet res = new DocListAndSet();
        res.docList = new DocSlice(0, docs, luceneIds, null, docs, 0);
        if (rb.isNeedDocSet()) {
            // TODO: create a cache for this!
            List<Query> queries = new ArrayList<>();
            queries.add(rb.getQuery());
            List<Query> filters = rb.getFilters();
            if (filters != null)
                queries.addAll(filters);
            res.docSet = searcher.getDocSet(queries);
        }
        rb.setResults(res);
        ResultContext ctx = new BasicResultContext(rb);
        rsp.addResponse(ctx);
        return;
    }
    // -1 as flag if not set.
    long timeAllowed = params.getLong(CommonParams.TIME_ALLOWED, -1L);
    if (null != rb.getCursorMark() && 0 < timeAllowed) {
        // fundamentally incompatible
        throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Can not search using both " + CursorMarkParams.CURSOR_MARK_PARAM + " and " + CommonParams.TIME_ALLOWED);
    }
    QueryCommand cmd = rb.getQueryCommand();
    cmd.setTimeAllowed(timeAllowed);
    req.getContext().put(SolrIndexSearcher.STATS_SOURCE, statsCache.get(req));
    QueryResult result = new QueryResult();
    cmd.setSegmentTerminateEarly(params.getBool(CommonParams.SEGMENT_TERMINATE_EARLY, CommonParams.SEGMENT_TERMINATE_EARLY_DEFAULT));
    if (cmd.getSegmentTerminateEarly()) {
        result.setSegmentTerminatedEarly(Boolean.FALSE);
    }
    //
    // grouping / field collapsing
    //
    GroupingSpecification groupingSpec = rb.getGroupingSpec();
    if (groupingSpec != null) {
        // not supported, silently ignore any segmentTerminateEarly flag
        cmd.setSegmentTerminateEarly(false);
        try {
            boolean needScores = (cmd.getFlags() & SolrIndexSearcher.GET_SCORES) != 0;
            if (params.getBool(GroupParams.GROUP_DISTRIBUTED_FIRST, false)) {
                CommandHandler.Builder topsGroupsActionBuilder = new CommandHandler.Builder().setQueryCommand(cmd).setNeedDocSet(// Order matters here
                false).setIncludeHitCount(true).setSearcher(searcher);
                for (String field : groupingSpec.getFields()) {
                    topsGroupsActionBuilder.addCommandField(new SearchGroupsFieldCommand.Builder().setField(schema.getField(field)).setGroupSort(groupingSpec.getGroupSort()).setTopNGroups(cmd.getOffset() + cmd.getLen()).setIncludeGroupCount(groupingSpec.isIncludeGroupCount()).build());
                }
                CommandHandler commandHandler = topsGroupsActionBuilder.build();
                commandHandler.execute();
                SearchGroupsResultTransformer serializer = new SearchGroupsResultTransformer(searcher);
                rsp.add("firstPhase", commandHandler.processResult(result, serializer));
                rsp.add("totalHitCount", commandHandler.getTotalHitCount());
                rb.setResult(result);
                return;
            } else if (params.getBool(GroupParams.GROUP_DISTRIBUTED_SECOND, false)) {
                CommandHandler.Builder secondPhaseBuilder = new CommandHandler.Builder().setQueryCommand(cmd).setTruncateGroups(groupingSpec.isTruncateGroups() && groupingSpec.getFields().length > 0).setSearcher(searcher);
                int docsToCollect = Grouping.getMax(groupingSpec.getWithinGroupOffset(), groupingSpec.getWithinGroupLimit(), searcher.maxDoc());
                docsToCollect = Math.max(docsToCollect, 1);
                for (String field : groupingSpec.getFields()) {
                    SchemaField schemaField = schema.getField(field);
                    String[] topGroupsParam = params.getParams(GroupParams.GROUP_DISTRIBUTED_TOPGROUPS_PREFIX + field);
                    if (topGroupsParam == null) {
                        topGroupsParam = new String[0];
                    }
                    List<SearchGroup<BytesRef>> topGroups = new ArrayList<>(topGroupsParam.length);
                    for (String topGroup : topGroupsParam) {
                        SearchGroup<BytesRef> searchGroup = new SearchGroup<>();
                        if (!topGroup.equals(TopGroupsShardRequestFactory.GROUP_NULL_VALUE)) {
                            BytesRefBuilder builder = new BytesRefBuilder();
                            schemaField.getType().readableToIndexed(topGroup, builder);
                            searchGroup.groupValue = builder.get();
                        }
                        topGroups.add(searchGroup);
                    }
                    secondPhaseBuilder.addCommandField(new TopGroupsFieldCommand.Builder().setField(schemaField).setGroupSort(groupingSpec.getGroupSort()).setSortWithinGroup(groupingSpec.getSortWithinGroup()).setFirstPhaseGroups(topGroups).setMaxDocPerGroup(docsToCollect).setNeedScores(needScores).setNeedMaxScore(needScores).build());
                }
                for (String query : groupingSpec.getQueries()) {
                    secondPhaseBuilder.addCommandField(new Builder().setDocsToCollect(docsToCollect).setSort(groupingSpec.getGroupSort()).setQuery(query, rb.req).setDocSet(searcher).build());
                }
                CommandHandler commandHandler = secondPhaseBuilder.build();
                commandHandler.execute();
                TopGroupsResultTransformer serializer = new TopGroupsResultTransformer(rb);
                rsp.add("secondPhase", commandHandler.processResult(result, serializer));
                rb.setResult(result);
                return;
            }
            int maxDocsPercentageToCache = params.getInt(GroupParams.GROUP_CACHE_PERCENTAGE, 0);
            boolean cacheSecondPassSearch = maxDocsPercentageToCache >= 1 && maxDocsPercentageToCache <= 100;
            Grouping.TotalCount defaultTotalCount = groupingSpec.isIncludeGroupCount() ? Grouping.TotalCount.grouped : Grouping.TotalCount.ungrouped;
            // this is normally from "rows"
            int limitDefault = cmd.getLen();
            Grouping grouping = new Grouping(searcher, result, cmd, cacheSecondPassSearch, maxDocsPercentageToCache, groupingSpec.isMain());
            grouping.setGroupSort(groupingSpec.getGroupSort()).setWithinGroupSort(groupingSpec.getSortWithinGroup()).setDefaultFormat(groupingSpec.getResponseFormat()).setLimitDefault(limitDefault).setDefaultTotalCount(defaultTotalCount).setDocsPerGroupDefault(groupingSpec.getWithinGroupLimit()).setGroupOffsetDefault(groupingSpec.getWithinGroupOffset()).setGetGroupedDocSet(groupingSpec.isTruncateGroups());
            if (groupingSpec.getFields() != null) {
                for (String field : groupingSpec.getFields()) {
                    grouping.addFieldCommand(field, rb.req);
                }
            }
            if (groupingSpec.getFunctions() != null) {
                for (String groupByStr : groupingSpec.getFunctions()) {
                    grouping.addFunctionCommand(groupByStr, rb.req);
                }
            }
            if (groupingSpec.getQueries() != null) {
                for (String groupByStr : groupingSpec.getQueries()) {
                    grouping.addQueryCommand(groupByStr, rb.req);
                }
            }
            if (rb.isNeedDocList() || rb.isDebug()) {
                // we need a single list of the returned docs
                cmd.setFlags(SolrIndexSearcher.GET_DOCLIST);
            }
            grouping.execute();
            if (grouping.isSignalCacheWarning()) {
                rsp.add("cacheWarning", String.format(Locale.ROOT, "Cache limit of %d percent relative to maxdoc has exceeded. Please increase cache size or disable caching.", maxDocsPercentageToCache));
            }
            rb.setResult(result);
            if (grouping.mainResult != null) {
                ResultContext ctx = new BasicResultContext(rb, grouping.mainResult);
                rsp.addResponse(ctx);
                rsp.getToLog().add("hits", grouping.mainResult.matches());
            } else if (!grouping.getCommands().isEmpty()) {
                // Can never be empty since grouping.execute() checks for this.
                rsp.add("grouped", result.groupedResults);
                rsp.getToLog().add("hits", grouping.getCommands().get(0).getMatches());
            }
            return;
        } catch (SyntaxError e) {
            throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, e);
        }
    }
    // normal search result
    searcher.search(result, cmd);
    rb.setResult(result);
    ResultContext ctx = new BasicResultContext(rb);
    rsp.addResponse(ctx);
    rsp.getToLog().add("hits", rb.getResults().docList.matches());
    if (!rb.req.getParams().getBool(ShardParams.IS_SHARD, false)) {
        if (null != rb.getNextCursorMark()) {
            rb.rsp.add(CursorMarkParams.CURSOR_MARK_NEXT, rb.getNextCursorMark().getSerializedTotem());
        }
    }
    if (rb.mergeFieldHandler != null) {
        rb.mergeFieldHandler.handleMergeFields(rb, searcher);
    } else {
        doFieldSortValues(rb, searcher);
    }
    doPrefetch(rb);
}
Also used : BasicResultContext(org.apache.solr.response.BasicResultContext) ResultContext(org.apache.solr.response.ResultContext) Query(org.apache.lucene.search.Query) MatchNoDocsQuery(org.apache.lucene.search.MatchNoDocsQuery) RankQuery(org.apache.solr.search.RankQuery) DocListAndSet(org.apache.solr.search.DocListAndSet) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) Builder(org.apache.solr.search.grouping.distributed.command.QueryCommand.Builder) ArrayList(java.util.ArrayList) CommandHandler(org.apache.solr.search.grouping.CommandHandler) DocSlice(org.apache.solr.search.DocSlice) BasicResultContext(org.apache.solr.response.BasicResultContext) StatsCache(org.apache.solr.search.stats.StatsCache) QueryResult(org.apache.solr.search.QueryResult) SyntaxError(org.apache.solr.search.SyntaxError) SolrDocumentList(org.apache.solr.common.SolrDocumentList) DocList(org.apache.solr.search.DocList) List(java.util.List) ArrayList(java.util.ArrayList) NamedList(org.apache.solr.common.util.NamedList) GroupingSpecification(org.apache.solr.search.grouping.GroupingSpecification) SearchGroupsResultTransformer(org.apache.solr.search.grouping.distributed.shardresultserializer.SearchGroupsResultTransformer) SolrException(org.apache.solr.common.SolrException) BytesRef(org.apache.lucene.util.BytesRef) SolrQueryResponse(org.apache.solr.response.SolrQueryResponse) BytesRefBuilder(org.apache.lucene.util.BytesRefBuilder) SearchGroup(org.apache.lucene.search.grouping.SearchGroup) Grouping(org.apache.solr.search.Grouping) SolrIndexSearcher(org.apache.solr.search.SolrIndexSearcher) Term(org.apache.lucene.index.Term) TopGroupsResultTransformer(org.apache.solr.search.grouping.distributed.shardresultserializer.TopGroupsResultTransformer) SchemaField(org.apache.solr.schema.SchemaField) SolrQueryRequest(org.apache.solr.request.SolrQueryRequest) SolrParams(org.apache.solr.common.params.SolrParams) ModifiableSolrParams(org.apache.solr.common.params.ModifiableSolrParams) QueryCommand(org.apache.solr.search.QueryCommand) IndexSchema(org.apache.solr.schema.IndexSchema)

Aggregations

ArrayList (java.util.ArrayList)4 Query (org.apache.lucene.search.Query)4 SolrParams (org.apache.solr.common.params.SolrParams)4 NamedList (org.apache.solr.common.util.NamedList)4 SolrQueryRequest (org.apache.solr.request.SolrQueryRequest)4 DocSlice (org.apache.solr.search.DocSlice)4 SolrIndexSearcher (org.apache.solr.search.SolrIndexSearcher)4 DocList (org.apache.solr.search.DocList)3 HashMap (java.util.HashMap)2 Sort (org.apache.lucene.search.Sort)2 BytesRef (org.apache.lucene.util.BytesRef)2 SolrException (org.apache.solr.common.SolrException)2 DocListAndSet (org.apache.solr.search.DocListAndSet)2 IntHashSet (com.carrotsearch.hppc.IntHashSet)1 IntObjectHashMap (com.carrotsearch.hppc.IntObjectHashMap)1 LongHashSet (com.carrotsearch.hppc.LongHashSet)1 LongObjectHashMap (com.carrotsearch.hppc.LongObjectHashMap)1 IOException (java.io.IOException)1 List (java.util.List)1 Field (org.apache.lucene.document.Field)1