use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class PivotFacetProcessor method doPivots.
/**
* Recursive function to compute all the pivot counts for the values under the specified field
*/
protected List<NamedList<Object>> doPivots(NamedList<Integer> superFacets, String field, String subField, Deque<String> fnames, Deque<String> vnames, ParsedParams parsed, List<StatsField> statsFields, List<FacetComponent.FacetBase> facetQueries, List<RangeFacetRequest> facetRanges) throws IOException {
boolean isShard = rb.req.getParams().getBool(ShardParams.IS_SHARD, false);
SolrIndexSearcher searcher = rb.req.getSearcher();
// TODO: optimize to avoid converting to an external string and then having to convert back to internal below
SchemaField sfield = searcher.getSchema().getField(field);
FieldType ftype = sfield.getType();
String nextField = fnames.poll();
// re-usable BytesRefBuilder for conversion of term values to Objects
BytesRefBuilder termval = new BytesRefBuilder();
List<NamedList<Object>> values = new ArrayList<>(superFacets.size());
for (Map.Entry<String, Integer> kv : superFacets) {
// Only sub-facet if parent facet has positive count - still may not be any values for the sub-field though
if (kv.getValue() >= getMinCountForField(field)) {
final String fieldValue = kv.getKey();
final int pivotCount = kv.getValue();
SimpleOrderedMap<Object> pivot = new SimpleOrderedMap<>();
pivot.add("field", field);
if (null == fieldValue) {
pivot.add("value", null);
} else {
ftype.readableToIndexed(fieldValue, termval);
pivot.add("value", ftype.toObject(sfield, termval.get()));
}
pivot.add("count", pivotCount);
final DocSet subset = getSubset(parsed.docs, sfield, fieldValue);
addPivotQueriesAndRanges(pivot, params, subset, facetQueries, facetRanges);
if (subField != null) {
NamedList<Integer> facetCounts;
if (!vnames.isEmpty()) {
String val = vnames.pop();
facetCounts = new NamedList<>();
facetCounts.add(val, getSubsetSize(subset, searcher.getSchema().getField(subField), val));
} else {
facetCounts = this.getTermCountsForPivots(subField, parsed.withDocs(subset));
}
if (facetCounts.size() >= 1) {
pivot.add("pivot", doPivots(facetCounts, subField, nextField, fnames, vnames, parsed.withDocs(subset), statsFields, facetQueries, facetRanges));
}
}
if ((isShard || 0 < pivotCount) && !statsFields.isEmpty()) {
Map<String, StatsValues> stv = new LinkedHashMap<>();
for (StatsField statsField : statsFields) {
stv.put(statsField.getOutputKey(), statsField.computeLocalStatsValues(subset));
}
pivot.add("stats", StatsComponent.convertToResponse(stv));
}
values.add(pivot);
}
}
// put the field back on the list
fnames.push(nextField);
return values;
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class SpellCheckComponent method maxResultsForSuggest.
private Integer maxResultsForSuggest(ResponseBuilder rb) {
SolrParams params = rb.req.getParams();
float maxResultsForSuggestParamValue = params.getFloat(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST, 0.0f);
Integer maxResultsForSuggest = null;
if (maxResultsForSuggestParamValue > 0.0f) {
if (maxResultsForSuggestParamValue == (int) maxResultsForSuggestParamValue) {
// If a whole number was passed in, this is a discrete number of documents
maxResultsForSuggest = (int) maxResultsForSuggestParamValue;
} else {
// If a fractional value was passed in, this is the % of documents returned by the specified filter
// If no specified filter, we use the most restrictive filter of the fq parameters
String maxResultsFilterQueryString = params.get(SpellingParams.SPELLCHECK_MAX_RESULTS_FOR_SUGGEST_FQ);
int maxResultsByFilters = Integer.MAX_VALUE;
SolrIndexSearcher searcher = rb.req.getSearcher();
try {
if (maxResultsFilterQueryString != null) {
// Get the default Lucene query parser
QParser parser = QParser.getParser(maxResultsFilterQueryString, rb.req);
DocSet s = searcher.getDocSet(parser.getQuery());
maxResultsByFilters = s.size();
} else {
List<Query> filters = rb.getFilters();
// Get the maximum possible hits within these filters (size of most restrictive filter).
if (filters != null) {
for (Query query : filters) {
DocSet s = searcher.getDocSet(query);
if (s != null) {
maxResultsByFilters = Math.min(s.size(), maxResultsByFilters);
}
}
}
}
} catch (IOException e) {
LOG.error(e.toString());
return null;
} catch (SyntaxError e) {
LOG.error(e.toString());
return null;
}
// Recalculate maxResultsForSuggest if filters were specified
if (maxResultsByFilters != Integer.MAX_VALUE) {
maxResultsForSuggest = Math.round(maxResultsByFilters * maxResultsForSuggestParamValue);
}
}
}
return maxResultsForSuggest;
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class MoreLikeThisComponent method process.
@Override
public void process(ResponseBuilder rb) throws IOException {
SolrParams params = rb.req.getParams();
if (params.getBool(MoreLikeThisParams.MLT, false)) {
ReturnFields returnFields = new SolrReturnFields(rb.req);
int flags = 0;
if (returnFields.wantsScore()) {
flags |= SolrIndexSearcher.GET_SCORES;
}
rb.setFieldFlags(flags);
log.debug("Starting MoreLikeThis.Process. isShard: " + params.getBool(ShardParams.IS_SHARD));
SolrIndexSearcher searcher = rb.req.getSearcher();
if (params.getBool(ShardParams.IS_SHARD, false)) {
if (params.get(MoreLikeThisComponent.DIST_DOC_ID) == null) {
if (rb.getResults().docList.size() == 0) {
// return empty response
rb.rsp.add("moreLikeThis", new NamedList<DocList>());
return;
}
MoreLikeThisHandler.MoreLikeThisHelper mlt = new MoreLikeThisHandler.MoreLikeThisHelper(params, searcher);
NamedList<BooleanQuery> bQuery = mlt.getMoreLikeTheseQuery(rb.getResults().docList);
NamedList<String> temp = new NamedList<>();
Iterator<Entry<String, BooleanQuery>> idToQueryIt = bQuery.iterator();
while (idToQueryIt.hasNext()) {
Entry<String, BooleanQuery> idToQuery = idToQueryIt.next();
String s = idToQuery.getValue().toString();
log.debug("MLT Query:" + s);
temp.add(idToQuery.getKey(), idToQuery.getValue().toString());
}
rb.rsp.add("moreLikeThis", temp);
} else {
NamedList<DocList> sim = getMoreLikeThese(rb, rb.req.getSearcher(), rb.getResults().docList, flags);
rb.rsp.add("moreLikeThis", sim);
}
} else {
// non distrib case
NamedList<DocList> sim = getMoreLikeThese(rb, rb.req.getSearcher(), rb.getResults().docList, flags);
rb.rsp.add("moreLikeThis", sim);
}
}
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class RealTimeGetComponent method getInputDocument.
/**
* Obtains the latest document for a given id from the tlog or through the realtime searcher (if not found in the tlog).
* @param versionReturned If a non-null AtomicLong is passed in, it is set to the version of the update returned from the TLog.
* @param avoidRetrievingStoredFields Setting this to true avoids fetching stored fields through the realtime searcher,
* however has no effect on documents obtained from the tlog.
* Non-stored docValues fields are populated anyway, and are not affected by this parameter. Note that if
* the id field is a stored field, it will not be populated if this parameter is true and the document is
* obtained from the index.
* @param onlyTheseNonStoredDVs If not-null, populate only these DV fields in the document fetched through the realtime searcher.
* If this is null, decorate all non-stored DVs (that are not targets of copy fields) from the searcher.
* @param resolveFullDocument In case the document is fetched from the tlog, it could only be a partial document if the last update
* was an in-place update. In that case, should this partial document be resolved to a full document (by following
* back prevPointer/prevVersion)?
*/
public static SolrInputDocument getInputDocument(SolrCore core, BytesRef idBytes, AtomicLong versionReturned, boolean avoidRetrievingStoredFields, Set<String> onlyTheseNonStoredDVs, boolean resolveFullDocument) throws IOException {
SolrInputDocument sid = null;
RefCounted<SolrIndexSearcher> searcherHolder = null;
try {
SolrIndexSearcher searcher = null;
sid = getInputDocumentFromTlog(core, idBytes, versionReturned, onlyTheseNonStoredDVs, resolveFullDocument);
if (sid == DELETED) {
return null;
}
if (sid == null) {
// didn't find it in the update log, so it should be in the newest searcher opened
if (searcher == null) {
searcherHolder = core.getRealtimeSearcher();
searcher = searcherHolder.get();
}
// SolrCore.verbose("RealTimeGet using searcher ", searcher);
SchemaField idField = core.getLatestSchema().getUniqueKeyField();
int docid = searcher.getFirstMatch(new Term(idField.getName(), idBytes));
if (docid < 0)
return null;
SolrDocumentFetcher docFetcher = searcher.getDocFetcher();
if (avoidRetrievingStoredFields) {
sid = new SolrInputDocument();
} else {
Document luceneDocument = docFetcher.doc(docid);
sid = toSolrInputDocument(luceneDocument, core.getLatestSchema());
}
if (onlyTheseNonStoredDVs != null) {
docFetcher.decorateDocValueFields(sid, docid, onlyTheseNonStoredDVs);
} else {
docFetcher.decorateDocValueFields(sid, docid, docFetcher.getNonStoredDVsWithoutCopyTargets());
}
}
} finally {
if (searcherHolder != null) {
searcherHolder.decref();
}
}
if (versionReturned != null) {
if (sid.containsKey(VERSION_FIELD)) {
versionReturned.set((long) sid.getFieldValue(VERSION_FIELD));
}
}
return sid;
}
use of org.apache.solr.search.SolrIndexSearcher in project lucene-solr by apache.
the class RealTimeGetComponent method reopenRealtimeSearcherAndGet.
/**
* Re-open the RT searcher and get the document, referred to by the idTerm, from that searcher.
* @return Returns the document or null if not found.
*/
private static SolrDocument reopenRealtimeSearcherAndGet(SolrCore core, Term idTerm, ReturnFields returnFields) throws IOException {
UpdateLog ulog = core.getUpdateHandler().getUpdateLog();
ulog.openRealtimeSearcher();
RefCounted<SolrIndexSearcher> searcherHolder = core.getRealtimeSearcher();
try {
SolrIndexSearcher searcher = searcherHolder.get();
int docid = searcher.getFirstMatch(idTerm);
if (docid < 0) {
return null;
}
Document luceneDocument = searcher.doc(docid, returnFields.getLuceneFieldNames());
SolrDocument doc = toSolrDoc(luceneDocument, core.getLatestSchema());
SolrDocumentFetcher docFetcher = searcher.getDocFetcher();
docFetcher.decorateDocValueFields(doc, docid, docFetcher.getNonStoredDVs(false));
return doc;
} finally {
searcherHolder.decref();
}
}
Aggregations